VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66457

Last change on this file since 66457 was 66457, checked in by vboxsync, 8 years ago

IEM: Limited xsave and xrstor implemention. Implemented vstmxcsr.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 590.1 KB
Line 
1/* $Id: IEMAll.cpp 66457 2017-04-06 10:44:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218
219/*********************************************************************************************************************************
220* Defined Constants And Macros *
221*********************************************************************************************************************************/
222/** @def IEM_WITH_SETJMP
223 * Enables alternative status code handling using setjmps.
224 *
225 * This adds a bit of expense via the setjmp() call since it saves all the
226 * non-volatile registers. However, it eliminates return code checks and allows
227 * for more optimal return value passing (return regs instead of stack buffer).
228 */
229#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
230# define IEM_WITH_SETJMP
231#endif
232
233/** Temporary hack to disable the double execution. Will be removed in favor
234 * of a dedicated execution mode in EM. */
235//#define IEM_VERIFICATION_MODE_NO_REM
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/** @def IEM_USE_UNALIGNED_DATA_ACCESS
363 * Use unaligned accesses instead of elaborate byte assembly. */
364#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
365# define IEM_USE_UNALIGNED_DATA_ACCESS
366#endif
367
368#ifdef VBOX_WITH_NESTED_HWVIRT
369/**
370 * Check the common SVM instruction preconditions.
371 */
372#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
373 do { \
374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
375 { \
376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
377 return iemRaiseUndefinedOpcode(pVCpu); \
378 } \
379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
380 { \
381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
382 return iemRaiseUndefinedOpcode(pVCpu); \
383 } \
384 if (pVCpu->iem.s.uCpl != 0) \
385 { \
386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
387 return iemRaiseGeneralProtectionFault0(pVCpu); \
388 } \
389 } while (0)
390
391/**
392 * Check if an SVM is enabled.
393 */
394#define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if an SVM control/instruction intercept is set.
398 */
399#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
400
401/**
402 * Check if an SVM read CRx intercept is set.
403 */
404#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
405
406/**
407 * Check if an SVM write CRx intercept is set.
408 */
409#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
410
411/**
412 * Check if an SVM read DRx intercept is set.
413 */
414#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
415
416/**
417 * Check if an SVM write DRx intercept is set.
418 */
419#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
420
421/**
422 * Check if an SVM exception intercept is set.
423 */
424#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
425#endif /* VBOX_WITH_NESTED_HWVIRT */
426
427
428/*********************************************************************************************************************************
429* Global Variables *
430*********************************************************************************************************************************/
431extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
432
433
434/** Function table for the ADD instruction. */
435IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
436{
437 iemAImpl_add_u8, iemAImpl_add_u8_locked,
438 iemAImpl_add_u16, iemAImpl_add_u16_locked,
439 iemAImpl_add_u32, iemAImpl_add_u32_locked,
440 iemAImpl_add_u64, iemAImpl_add_u64_locked
441};
442
443/** Function table for the ADC instruction. */
444IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
445{
446 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
447 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
448 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
449 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
450};
451
452/** Function table for the SUB instruction. */
453IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
454{
455 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
456 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
457 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
458 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
459};
460
461/** Function table for the SBB instruction. */
462IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
463{
464 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
465 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
466 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
467 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
468};
469
470/** Function table for the OR instruction. */
471IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
472{
473 iemAImpl_or_u8, iemAImpl_or_u8_locked,
474 iemAImpl_or_u16, iemAImpl_or_u16_locked,
475 iemAImpl_or_u32, iemAImpl_or_u32_locked,
476 iemAImpl_or_u64, iemAImpl_or_u64_locked
477};
478
479/** Function table for the XOR instruction. */
480IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
481{
482 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
483 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
484 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
485 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
486};
487
488/** Function table for the AND instruction. */
489IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
490{
491 iemAImpl_and_u8, iemAImpl_and_u8_locked,
492 iemAImpl_and_u16, iemAImpl_and_u16_locked,
493 iemAImpl_and_u32, iemAImpl_and_u32_locked,
494 iemAImpl_and_u64, iemAImpl_and_u64_locked
495};
496
497/** Function table for the CMP instruction.
498 * @remarks Making operand order ASSUMPTIONS.
499 */
500IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
501{
502 iemAImpl_cmp_u8, NULL,
503 iemAImpl_cmp_u16, NULL,
504 iemAImpl_cmp_u32, NULL,
505 iemAImpl_cmp_u64, NULL
506};
507
508/** Function table for the TEST instruction.
509 * @remarks Making operand order ASSUMPTIONS.
510 */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
512{
513 iemAImpl_test_u8, NULL,
514 iemAImpl_test_u16, NULL,
515 iemAImpl_test_u32, NULL,
516 iemAImpl_test_u64, NULL
517};
518
519/** Function table for the BT instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
521{
522 NULL, NULL,
523 iemAImpl_bt_u16, NULL,
524 iemAImpl_bt_u32, NULL,
525 iemAImpl_bt_u64, NULL
526};
527
528/** Function table for the BTC instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
530{
531 NULL, NULL,
532 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
533 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
534 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
535};
536
537/** Function table for the BTR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
539{
540 NULL, NULL,
541 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
542 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
543 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
544};
545
546/** Function table for the BTS instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
548{
549 NULL, NULL,
550 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
551 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
552 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
553};
554
555/** Function table for the BSF instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
557{
558 NULL, NULL,
559 iemAImpl_bsf_u16, NULL,
560 iemAImpl_bsf_u32, NULL,
561 iemAImpl_bsf_u64, NULL
562};
563
564/** Function table for the BSR instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
566{
567 NULL, NULL,
568 iemAImpl_bsr_u16, NULL,
569 iemAImpl_bsr_u32, NULL,
570 iemAImpl_bsr_u64, NULL
571};
572
573/** Function table for the IMUL instruction. */
574IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
575{
576 NULL, NULL,
577 iemAImpl_imul_two_u16, NULL,
578 iemAImpl_imul_two_u32, NULL,
579 iemAImpl_imul_two_u64, NULL
580};
581
582/** Group 1 /r lookup table. */
583IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
584{
585 &g_iemAImpl_add,
586 &g_iemAImpl_or,
587 &g_iemAImpl_adc,
588 &g_iemAImpl_sbb,
589 &g_iemAImpl_and,
590 &g_iemAImpl_sub,
591 &g_iemAImpl_xor,
592 &g_iemAImpl_cmp
593};
594
595/** Function table for the INC instruction. */
596IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
597{
598 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
599 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
600 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
601 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
602};
603
604/** Function table for the DEC instruction. */
605IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
606{
607 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
608 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
609 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
610 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
611};
612
613/** Function table for the NEG instruction. */
614IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
615{
616 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
617 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
618 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
619 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
620};
621
622/** Function table for the NOT instruction. */
623IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
624{
625 iemAImpl_not_u8, iemAImpl_not_u8_locked,
626 iemAImpl_not_u16, iemAImpl_not_u16_locked,
627 iemAImpl_not_u32, iemAImpl_not_u32_locked,
628 iemAImpl_not_u64, iemAImpl_not_u64_locked
629};
630
631
632/** Function table for the ROL instruction. */
633IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
634{
635 iemAImpl_rol_u8,
636 iemAImpl_rol_u16,
637 iemAImpl_rol_u32,
638 iemAImpl_rol_u64
639};
640
641/** Function table for the ROR instruction. */
642IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
643{
644 iemAImpl_ror_u8,
645 iemAImpl_ror_u16,
646 iemAImpl_ror_u32,
647 iemAImpl_ror_u64
648};
649
650/** Function table for the RCL instruction. */
651IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
652{
653 iemAImpl_rcl_u8,
654 iemAImpl_rcl_u16,
655 iemAImpl_rcl_u32,
656 iemAImpl_rcl_u64
657};
658
659/** Function table for the RCR instruction. */
660IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
661{
662 iemAImpl_rcr_u8,
663 iemAImpl_rcr_u16,
664 iemAImpl_rcr_u32,
665 iemAImpl_rcr_u64
666};
667
668/** Function table for the SHL instruction. */
669IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
670{
671 iemAImpl_shl_u8,
672 iemAImpl_shl_u16,
673 iemAImpl_shl_u32,
674 iemAImpl_shl_u64
675};
676
677/** Function table for the SHR instruction. */
678IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
679{
680 iemAImpl_shr_u8,
681 iemAImpl_shr_u16,
682 iemAImpl_shr_u32,
683 iemAImpl_shr_u64
684};
685
686/** Function table for the SAR instruction. */
687IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
688{
689 iemAImpl_sar_u8,
690 iemAImpl_sar_u16,
691 iemAImpl_sar_u32,
692 iemAImpl_sar_u64
693};
694
695
696/** Function table for the MUL instruction. */
697IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
698{
699 iemAImpl_mul_u8,
700 iemAImpl_mul_u16,
701 iemAImpl_mul_u32,
702 iemAImpl_mul_u64
703};
704
705/** Function table for the IMUL instruction working implicitly on rAX. */
706IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
707{
708 iemAImpl_imul_u8,
709 iemAImpl_imul_u16,
710 iemAImpl_imul_u32,
711 iemAImpl_imul_u64
712};
713
714/** Function table for the DIV instruction. */
715IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
716{
717 iemAImpl_div_u8,
718 iemAImpl_div_u16,
719 iemAImpl_div_u32,
720 iemAImpl_div_u64
721};
722
723/** Function table for the MUL instruction. */
724IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
725{
726 iemAImpl_idiv_u8,
727 iemAImpl_idiv_u16,
728 iemAImpl_idiv_u32,
729 iemAImpl_idiv_u64
730};
731
732/** Function table for the SHLD instruction */
733IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
734{
735 iemAImpl_shld_u16,
736 iemAImpl_shld_u32,
737 iemAImpl_shld_u64,
738};
739
740/** Function table for the SHRD instruction */
741IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
742{
743 iemAImpl_shrd_u16,
744 iemAImpl_shrd_u32,
745 iemAImpl_shrd_u64,
746};
747
748
749/** Function table for the PUNPCKLBW instruction */
750IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
751/** Function table for the PUNPCKLBD instruction */
752IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
753/** Function table for the PUNPCKLDQ instruction */
754IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
755/** Function table for the PUNPCKLQDQ instruction */
756IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
757
758/** Function table for the PUNPCKHBW instruction */
759IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
760/** Function table for the PUNPCKHBD instruction */
761IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
762/** Function table for the PUNPCKHDQ instruction */
763IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
764/** Function table for the PUNPCKHQDQ instruction */
765IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
766
767/** Function table for the PXOR instruction */
768IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
769/** Function table for the PCMPEQB instruction */
770IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
771/** Function table for the PCMPEQW instruction */
772IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
773/** Function table for the PCMPEQD instruction */
774IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
775
776
777#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
778/** What IEM just wrote. */
779uint8_t g_abIemWrote[256];
780/** How much IEM just wrote. */
781size_t g_cbIemWrote;
782#endif
783
784
785/*********************************************************************************************************************************
786* Internal Functions *
787*********************************************************************************************************************************/
788IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
789IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
790IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
791IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
792/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
793IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
794IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
795IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
796IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
797IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
798IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
799IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
800IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
801IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
802IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
803IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
804IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
805#ifdef IEM_WITH_SETJMP
806DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
807DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
808DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
809DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
810DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
811#endif
812
813IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
814IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
815IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
816IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
817IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
820IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
821IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
822IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
823IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
824IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
825IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
826IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
827IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
828IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
829
830#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
831IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
832#endif
833IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
834IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
835
836
837
838/**
839 * Sets the pass up status.
840 *
841 * @returns VINF_SUCCESS.
842 * @param pVCpu The cross context virtual CPU structure of the
843 * calling thread.
844 * @param rcPassUp The pass up status. Must be informational.
845 * VINF_SUCCESS is not allowed.
846 */
847IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
848{
849 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
850
851 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
852 if (rcOldPassUp == VINF_SUCCESS)
853 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
854 /* If both are EM scheduling codes, use EM priority rules. */
855 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
856 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
857 {
858 if (rcPassUp < rcOldPassUp)
859 {
860 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
861 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
862 }
863 else
864 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
865 }
866 /* Override EM scheduling with specific status code. */
867 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
868 {
869 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
870 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
871 }
872 /* Don't override specific status code, first come first served. */
873 else
874 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
875 return VINF_SUCCESS;
876}
877
878
879/**
880 * Calculates the CPU mode.
881 *
882 * This is mainly for updating IEMCPU::enmCpuMode.
883 *
884 * @returns CPU mode.
885 * @param pCtx The register context for the CPU.
886 */
887DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
888{
889 if (CPUMIsGuestIn64BitCodeEx(pCtx))
890 return IEMMODE_64BIT;
891 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
892 return IEMMODE_32BIT;
893 return IEMMODE_16BIT;
894}
895
896
897/**
898 * Initializes the execution state.
899 *
900 * @param pVCpu The cross context virtual CPU structure of the
901 * calling thread.
902 * @param fBypassHandlers Whether to bypass access handlers.
903 *
904 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
905 * side-effects in strict builds.
906 */
907DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
908{
909 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
910
911 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
912
913#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
915 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
918 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
920 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
921 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
922#endif
923
924#ifdef VBOX_WITH_RAW_MODE_NOT_R0
925 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
926#endif
927 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
928 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
929#ifdef VBOX_STRICT
930 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
931 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
932 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
933 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
934 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
935 pVCpu->iem.s.uRexReg = 127;
936 pVCpu->iem.s.uRexB = 127;
937 pVCpu->iem.s.uRexIndex = 127;
938 pVCpu->iem.s.iEffSeg = 127;
939 pVCpu->iem.s.idxPrefix = 127;
940 pVCpu->iem.s.uVex3rdReg = 127;
941 pVCpu->iem.s.uVexLength = 127;
942 pVCpu->iem.s.fEvexStuff = 127;
943 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
944# ifdef IEM_WITH_CODE_TLB
945 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
946 pVCpu->iem.s.pbInstrBuf = NULL;
947 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
948 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
949 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
950 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
951# else
952 pVCpu->iem.s.offOpcode = 127;
953 pVCpu->iem.s.cbOpcode = 127;
954# endif
955#endif
956
957 pVCpu->iem.s.cActiveMappings = 0;
958 pVCpu->iem.s.iNextMapping = 0;
959 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
960 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
963 && pCtx->cs.u64Base == 0
964 && pCtx->cs.u32Limit == UINT32_MAX
965 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
966 if (!pVCpu->iem.s.fInPatchCode)
967 CPUMRawLeave(pVCpu, VINF_SUCCESS);
968#endif
969
970#ifdef IEM_VERIFICATION_MODE_FULL
971 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
972 pVCpu->iem.s.fNoRem = true;
973#endif
974}
975
976
977/**
978 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
979 *
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling thread.
982 */
983DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
984{
985 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
986#ifdef IEM_VERIFICATION_MODE_FULL
987 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
988#endif
989#ifdef VBOX_STRICT
990# ifdef IEM_WITH_CODE_TLB
991 NOREF(pVCpu);
992# else
993 pVCpu->iem.s.cbOpcode = 0;
994# endif
995#else
996 NOREF(pVCpu);
997#endif
998}
999
1000
1001/**
1002 * Initializes the decoder state.
1003 *
1004 * iemReInitDecoder is mostly a copy of this function.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 * @param fBypassHandlers Whether to bypass access handlers.
1009 */
1010DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1011{
1012 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1013
1014 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1015
1016#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1025#endif
1026
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1029#endif
1030 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1031#ifdef IEM_VERIFICATION_MODE_FULL
1032 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1033 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1034#endif
1035 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1036 pVCpu->iem.s.enmCpuMode = enmMode;
1037 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1038 pVCpu->iem.s.enmEffAddrMode = enmMode;
1039 if (enmMode != IEMMODE_64BIT)
1040 {
1041 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1042 pVCpu->iem.s.enmEffOpSize = enmMode;
1043 }
1044 else
1045 {
1046 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1047 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1048 }
1049 pVCpu->iem.s.fPrefixes = 0;
1050 pVCpu->iem.s.uRexReg = 0;
1051 pVCpu->iem.s.uRexB = 0;
1052 pVCpu->iem.s.uRexIndex = 0;
1053 pVCpu->iem.s.idxPrefix = 0;
1054 pVCpu->iem.s.uVex3rdReg = 0;
1055 pVCpu->iem.s.uVexLength = 0;
1056 pVCpu->iem.s.fEvexStuff = 0;
1057 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1058#ifdef IEM_WITH_CODE_TLB
1059 pVCpu->iem.s.pbInstrBuf = NULL;
1060 pVCpu->iem.s.offInstrNextByte = 0;
1061 pVCpu->iem.s.offCurInstrStart = 0;
1062# ifdef VBOX_STRICT
1063 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1064 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1065 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1066# endif
1067#else
1068 pVCpu->iem.s.offOpcode = 0;
1069 pVCpu->iem.s.cbOpcode = 0;
1070#endif
1071 pVCpu->iem.s.cActiveMappings = 0;
1072 pVCpu->iem.s.iNextMapping = 0;
1073 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1074 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1077 && pCtx->cs.u64Base == 0
1078 && pCtx->cs.u32Limit == UINT32_MAX
1079 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1080 if (!pVCpu->iem.s.fInPatchCode)
1081 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1082#endif
1083
1084#ifdef DBGFTRACE_ENABLED
1085 switch (enmMode)
1086 {
1087 case IEMMODE_64BIT:
1088 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1089 break;
1090 case IEMMODE_32BIT:
1091 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1092 break;
1093 case IEMMODE_16BIT:
1094 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1095 break;
1096 }
1097#endif
1098}
1099
1100
1101/**
1102 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1103 *
1104 * This is mostly a copy of iemInitDecoder.
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1107 */
1108DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1109{
1110 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1111
1112 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1113
1114#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1123#endif
1124
1125 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1126#ifdef IEM_VERIFICATION_MODE_FULL
1127 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1128 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1129#endif
1130 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1131 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1132 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1133 pVCpu->iem.s.enmEffAddrMode = enmMode;
1134 if (enmMode != IEMMODE_64BIT)
1135 {
1136 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1137 pVCpu->iem.s.enmEffOpSize = enmMode;
1138 }
1139 else
1140 {
1141 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1142 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1143 }
1144 pVCpu->iem.s.fPrefixes = 0;
1145 pVCpu->iem.s.uRexReg = 0;
1146 pVCpu->iem.s.uRexB = 0;
1147 pVCpu->iem.s.uRexIndex = 0;
1148 pVCpu->iem.s.idxPrefix = 0;
1149 pVCpu->iem.s.uVex3rdReg = 0;
1150 pVCpu->iem.s.uVexLength = 0;
1151 pVCpu->iem.s.fEvexStuff = 0;
1152 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1153#ifdef IEM_WITH_CODE_TLB
1154 if (pVCpu->iem.s.pbInstrBuf)
1155 {
1156 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1157 - pVCpu->iem.s.uInstrBufPc;
1158 if (off < pVCpu->iem.s.cbInstrBufTotal)
1159 {
1160 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1161 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1162 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1163 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1164 else
1165 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1166 }
1167 else
1168 {
1169 pVCpu->iem.s.pbInstrBuf = NULL;
1170 pVCpu->iem.s.offInstrNextByte = 0;
1171 pVCpu->iem.s.offCurInstrStart = 0;
1172 pVCpu->iem.s.cbInstrBuf = 0;
1173 pVCpu->iem.s.cbInstrBufTotal = 0;
1174 }
1175 }
1176 else
1177 {
1178 pVCpu->iem.s.offInstrNextByte = 0;
1179 pVCpu->iem.s.offCurInstrStart = 0;
1180 pVCpu->iem.s.cbInstrBuf = 0;
1181 pVCpu->iem.s.cbInstrBufTotal = 0;
1182 }
1183#else
1184 pVCpu->iem.s.cbOpcode = 0;
1185 pVCpu->iem.s.offOpcode = 0;
1186#endif
1187 Assert(pVCpu->iem.s.cActiveMappings == 0);
1188 pVCpu->iem.s.iNextMapping = 0;
1189 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1190 Assert(pVCpu->iem.s.fBypassHandlers == false);
1191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1192 if (!pVCpu->iem.s.fInPatchCode)
1193 { /* likely */ }
1194 else
1195 {
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202 }
1203#endif
1204
1205#ifdef DBGFTRACE_ENABLED
1206 switch (enmMode)
1207 {
1208 case IEMMODE_64BIT:
1209 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1210 break;
1211 case IEMMODE_32BIT:
1212 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1213 break;
1214 case IEMMODE_16BIT:
1215 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1216 break;
1217 }
1218#endif
1219}
1220
1221
1222
1223/**
1224 * Prefetch opcodes the first time when starting executing.
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pVCpu The cross context virtual CPU structure of the
1228 * calling thread.
1229 * @param fBypassHandlers Whether to bypass access handlers.
1230 */
1231IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1232{
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1235#endif
1236 iemInitDecoder(pVCpu, fBypassHandlers);
1237
1238#ifdef IEM_WITH_CODE_TLB
1239 /** @todo Do ITLB lookup here. */
1240
1241#else /* !IEM_WITH_CODE_TLB */
1242
1243 /*
1244 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1245 *
1246 * First translate CS:rIP to a physical address.
1247 */
1248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1249 uint32_t cbToTryRead;
1250 RTGCPTR GCPtrPC;
1251 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1252 {
1253 cbToTryRead = PAGE_SIZE;
1254 GCPtrPC = pCtx->rip;
1255 if (IEM_IS_CANONICAL(GCPtrPC))
1256 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1257 else
1258 return iemRaiseGeneralProtectionFault0(pVCpu);
1259 }
1260 else
1261 {
1262 uint32_t GCPtrPC32 = pCtx->eip;
1263 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1264 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1265 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1266 else
1267 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1268 if (cbToTryRead) { /* likely */ }
1269 else /* overflowed */
1270 {
1271 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1272 cbToTryRead = UINT32_MAX;
1273 }
1274 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1275 Assert(GCPtrPC <= UINT32_MAX);
1276 }
1277
1278# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1279 /* Allow interpretation of patch manager code blocks since they can for
1280 instance throw #PFs for perfectly good reasons. */
1281 if (pVCpu->iem.s.fInPatchCode)
1282 {
1283 size_t cbRead = 0;
1284 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1285 AssertRCReturn(rc, rc);
1286 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1287 return VINF_SUCCESS;
1288 }
1289# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1290
1291 RTGCPHYS GCPhys;
1292 uint64_t fFlags;
1293 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1294 if (RT_SUCCESS(rc)) { /* probable */ }
1295 else
1296 {
1297 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1298 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1299 }
1300 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1301 else
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1304 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1305 }
1306 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1307 else
1308 {
1309 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1310 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1311 }
1312 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1313 /** @todo Check reserved bits and such stuff. PGM is better at doing
1314 * that, so do it when implementing the guest virtual address
1315 * TLB... */
1316
1317# ifdef IEM_VERIFICATION_MODE_FULL
1318 /*
1319 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1320 * instruction.
1321 */
1322 /** @todo optimize this differently by not using PGMPhysRead. */
1323 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1324 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1325 if ( offPrevOpcodes < cbOldOpcodes
1326 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1327 {
1328 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1329 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1330 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1331 pVCpu->iem.s.cbOpcode = cbNew;
1332 return VINF_SUCCESS;
1333 }
1334# endif
1335
1336 /*
1337 * Read the bytes at this address.
1338 */
1339 PVM pVM = pVCpu->CTX_SUFF(pVM);
1340# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1341 size_t cbActual;
1342 if ( PATMIsEnabled(pVM)
1343 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1344 {
1345 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1346 Assert(cbActual > 0);
1347 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1348 }
1349 else
1350# endif
1351 {
1352 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1353 if (cbToTryRead > cbLeftOnPage)
1354 cbToTryRead = cbLeftOnPage;
1355 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1356 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1357
1358 if (!pVCpu->iem.s.fBypassHandlers)
1359 {
1360 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1362 { /* likely */ }
1363 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1364 {
1365 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1366 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1368 }
1369 else
1370 {
1371 Log((RT_SUCCESS(rcStrict)
1372 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1373 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1374 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1375 return rcStrict;
1376 }
1377 }
1378 else
1379 {
1380 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1381 if (RT_SUCCESS(rc))
1382 { /* likely */ }
1383 else
1384 {
1385 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1386 GCPtrPC, GCPhys, rc, cbToTryRead));
1387 return rc;
1388 }
1389 }
1390 pVCpu->iem.s.cbOpcode = cbToTryRead;
1391 }
1392#endif /* !IEM_WITH_CODE_TLB */
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Invalidates the IEM TLBs.
1399 *
1400 * This is called internally as well as by PGM when moving GC mappings.
1401 *
1402 * @returns
1403 * @param pVCpu The cross context virtual CPU structure of the calling
1404 * thread.
1405 * @param fVmm Set when PGM calls us with a remapping.
1406 */
1407VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1408{
1409#ifdef IEM_WITH_CODE_TLB
1410 pVCpu->iem.s.cbInstrBufTotal = 0;
1411 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1412 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1413 { /* very likely */ }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1417 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1418 while (i-- > 0)
1419 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1420 }
1421#endif
1422
1423#ifdef IEM_WITH_DATA_TLB
1424 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1425 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1426 { /* very likely */ }
1427 else
1428 {
1429 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1430 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1431 while (i-- > 0)
1432 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1433 }
1434#endif
1435 NOREF(pVCpu); NOREF(fVmm);
1436}
1437
1438
1439/**
1440 * Invalidates a page in the TLBs.
1441 *
1442 * @param pVCpu The cross context virtual CPU structure of the calling
1443 * thread.
1444 * @param GCPtr The address of the page to invalidate
1445 */
1446VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1447{
1448#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1449 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1450 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1451 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1452 uintptr_t idx = (uint8_t)GCPtr;
1453
1454# ifdef IEM_WITH_CODE_TLB
1455 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1456 {
1457 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1458 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1459 pVCpu->iem.s.cbInstrBufTotal = 0;
1460 }
1461# endif
1462
1463# ifdef IEM_WITH_DATA_TLB
1464 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1465 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1466# endif
1467#else
1468 NOREF(pVCpu); NOREF(GCPtr);
1469#endif
1470}
1471
1472
1473/**
1474 * Invalidates the host physical aspects of the IEM TLBs.
1475 *
1476 * This is called internally as well as by PGM when moving GC mappings.
1477 *
1478 * @param pVCpu The cross context virtual CPU structure of the calling
1479 * thread.
1480 */
1481VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1482{
1483#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1484 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1485
1486# ifdef IEM_WITH_CODE_TLB
1487 pVCpu->iem.s.cbInstrBufTotal = 0;
1488# endif
1489 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1490 if (uTlbPhysRev != 0)
1491 {
1492 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1493 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1494 }
1495 else
1496 {
1497 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1498 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1499
1500 unsigned i;
1501# ifdef IEM_WITH_CODE_TLB
1502 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1503 while (i-- > 0)
1504 {
1505 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1506 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1507 }
1508# endif
1509# ifdef IEM_WITH_DATA_TLB
1510 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1511 while (i-- > 0)
1512 {
1513 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1514 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1515 }
1516# endif
1517 }
1518#else
1519 NOREF(pVCpu);
1520#endif
1521}
1522
1523
1524/**
1525 * Invalidates the host physical aspects of the IEM TLBs.
1526 *
1527 * This is called internally as well as by PGM when moving GC mappings.
1528 *
1529 * @param pVM The cross context VM structure.
1530 *
1531 * @remarks Caller holds the PGM lock.
1532 */
1533VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1534{
1535 RT_NOREF_PV(pVM);
1536}
1537
1538#ifdef IEM_WITH_CODE_TLB
1539
1540/**
1541 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1542 * failure and jumps.
1543 *
1544 * We end up here for a number of reasons:
1545 * - pbInstrBuf isn't yet initialized.
1546 * - Advancing beyond the buffer boundrary (e.g. cross page).
1547 * - Advancing beyond the CS segment limit.
1548 * - Fetching from non-mappable page (e.g. MMIO).
1549 *
1550 * @param pVCpu The cross context virtual CPU structure of the
1551 * calling thread.
1552 * @param pvDst Where to return the bytes.
1553 * @param cbDst Number of bytes to read.
1554 *
1555 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1556 */
1557IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1558{
1559#ifdef IN_RING3
1560//__debugbreak();
1561 for (;;)
1562 {
1563 Assert(cbDst <= 8);
1564 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1565
1566 /*
1567 * We might have a partial buffer match, deal with that first to make the
1568 * rest simpler. This is the first part of the cross page/buffer case.
1569 */
1570 if (pVCpu->iem.s.pbInstrBuf != NULL)
1571 {
1572 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1573 {
1574 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1575 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1576 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1577
1578 cbDst -= cbCopy;
1579 pvDst = (uint8_t *)pvDst + cbCopy;
1580 offBuf += cbCopy;
1581 pVCpu->iem.s.offInstrNextByte += offBuf;
1582 }
1583 }
1584
1585 /*
1586 * Check segment limit, figuring how much we're allowed to access at this point.
1587 *
1588 * We will fault immediately if RIP is past the segment limit / in non-canonical
1589 * territory. If we do continue, there are one or more bytes to read before we
1590 * end up in trouble and we need to do that first before faulting.
1591 */
1592 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1593 RTGCPTR GCPtrFirst;
1594 uint32_t cbMaxRead;
1595 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1596 {
1597 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1598 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1599 { /* likely */ }
1600 else
1601 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1602 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1603 }
1604 else
1605 {
1606 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1607 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1608 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1609 { /* likely */ }
1610 else
1611 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1612 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1613 if (cbMaxRead != 0)
1614 { /* likely */ }
1615 else
1616 {
1617 /* Overflowed because address is 0 and limit is max. */
1618 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1619 cbMaxRead = X86_PAGE_SIZE;
1620 }
1621 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1622 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1623 if (cbMaxRead2 < cbMaxRead)
1624 cbMaxRead = cbMaxRead2;
1625 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1626 }
1627
1628 /*
1629 * Get the TLB entry for this piece of code.
1630 */
1631 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1632 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1633 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1634 if (pTlbe->uTag == uTag)
1635 {
1636 /* likely when executing lots of code, otherwise unlikely */
1637# ifdef VBOX_WITH_STATISTICS
1638 pVCpu->iem.s.CodeTlb.cTlbHits++;
1639# endif
1640 }
1641 else
1642 {
1643 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1644# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1645 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1646 {
1647 pTlbe->uTag = uTag;
1648 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1649 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1650 pTlbe->GCPhys = NIL_RTGCPHYS;
1651 pTlbe->pbMappingR3 = NULL;
1652 }
1653 else
1654# endif
1655 {
1656 RTGCPHYS GCPhys;
1657 uint64_t fFlags;
1658 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1659 if (RT_FAILURE(rc))
1660 {
1661 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1662 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1663 }
1664
1665 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1666 pTlbe->uTag = uTag;
1667 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1668 pTlbe->GCPhys = GCPhys;
1669 pTlbe->pbMappingR3 = NULL;
1670 }
1671 }
1672
1673 /*
1674 * Check TLB page table level access flags.
1675 */
1676 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1677 {
1678 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1679 {
1680 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1681 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1682 }
1683 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1684 {
1685 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1686 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1687 }
1688 }
1689
1690# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1691 /*
1692 * Allow interpretation of patch manager code blocks since they can for
1693 * instance throw #PFs for perfectly good reasons.
1694 */
1695 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1696 { /* no unlikely */ }
1697 else
1698 {
1699 /** @todo Could be optimized this a little in ring-3 if we liked. */
1700 size_t cbRead = 0;
1701 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1702 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1703 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1704 return;
1705 }
1706# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1707
1708 /*
1709 * Look up the physical page info if necessary.
1710 */
1711 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1712 { /* not necessary */ }
1713 else
1714 {
1715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1716 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1717 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1721 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1722 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1723 }
1724
1725# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1726 /*
1727 * Try do a direct read using the pbMappingR3 pointer.
1728 */
1729 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1730 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1731 {
1732 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1733 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1734 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1735 {
1736 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1737 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1738 }
1739 else
1740 {
1741 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1742 Assert(cbInstr < cbMaxRead);
1743 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1744 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1745 }
1746 if (cbDst <= cbMaxRead)
1747 {
1748 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1749 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1750 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1751 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1752 return;
1753 }
1754 pVCpu->iem.s.pbInstrBuf = NULL;
1755
1756 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1757 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1758 }
1759 else
1760# endif
1761#if 0
1762 /*
1763 * If there is no special read handling, so we can read a bit more and
1764 * put it in the prefetch buffer.
1765 */
1766 if ( cbDst < cbMaxRead
1767 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1768 {
1769 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1770 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1772 { /* likely */ }
1773 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1776 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1777 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1778 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1779 }
1780 else
1781 {
1782 Log((RT_SUCCESS(rcStrict)
1783 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1784 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1785 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1786 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1787 }
1788 }
1789 /*
1790 * Special read handling, so only read exactly what's needed.
1791 * This is a highly unlikely scenario.
1792 */
1793 else
1794#endif
1795 {
1796 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1797 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1798 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1799 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1801 { /* likely */ }
1802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1803 {
1804 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1805 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1806 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1807 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1808 }
1809 else
1810 {
1811 Log((RT_SUCCESS(rcStrict)
1812 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1813 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1814 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1815 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1816 }
1817 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1818 if (cbToRead == cbDst)
1819 return;
1820 }
1821
1822 /*
1823 * More to read, loop.
1824 */
1825 cbDst -= cbMaxRead;
1826 pvDst = (uint8_t *)pvDst + cbMaxRead;
1827 }
1828#else
1829 RT_NOREF(pvDst, cbDst);
1830 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1831#endif
1832}
1833
1834#else
1835
1836/**
1837 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1838 * exception if it fails.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pVCpu The cross context virtual CPU structure of the
1842 * calling thread.
1843 * @param cbMin The minimum number of bytes relative offOpcode
1844 * that must be read.
1845 */
1846IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1847{
1848 /*
1849 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1850 *
1851 * First translate CS:rIP to a physical address.
1852 */
1853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1854 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1855 uint32_t cbToTryRead;
1856 RTGCPTR GCPtrNext;
1857 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1858 {
1859 cbToTryRead = PAGE_SIZE;
1860 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1861 if (!IEM_IS_CANONICAL(GCPtrNext))
1862 return iemRaiseGeneralProtectionFault0(pVCpu);
1863 }
1864 else
1865 {
1866 uint32_t GCPtrNext32 = pCtx->eip;
1867 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1868 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1869 if (GCPtrNext32 > pCtx->cs.u32Limit)
1870 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1871 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1872 if (!cbToTryRead) /* overflowed */
1873 {
1874 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1875 cbToTryRead = UINT32_MAX;
1876 /** @todo check out wrapping around the code segment. */
1877 }
1878 if (cbToTryRead < cbMin - cbLeft)
1879 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1880 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1881 }
1882
1883 /* Only read up to the end of the page, and make sure we don't read more
1884 than the opcode buffer can hold. */
1885 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1886 if (cbToTryRead > cbLeftOnPage)
1887 cbToTryRead = cbLeftOnPage;
1888 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1889 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1890/** @todo r=bird: Convert assertion into undefined opcode exception? */
1891 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1892
1893# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1894 /* Allow interpretation of patch manager code blocks since they can for
1895 instance throw #PFs for perfectly good reasons. */
1896 if (pVCpu->iem.s.fInPatchCode)
1897 {
1898 size_t cbRead = 0;
1899 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1900 AssertRCReturn(rc, rc);
1901 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1902 return VINF_SUCCESS;
1903 }
1904# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1905
1906 RTGCPHYS GCPhys;
1907 uint64_t fFlags;
1908 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1909 if (RT_FAILURE(rc))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1912 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1913 }
1914 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1915 {
1916 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1917 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1918 }
1919 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1922 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1923 }
1924 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1925 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1926 /** @todo Check reserved bits and such stuff. PGM is better at doing
1927 * that, so do it when implementing the guest virtual address
1928 * TLB... */
1929
1930 /*
1931 * Read the bytes at this address.
1932 *
1933 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1934 * and since PATM should only patch the start of an instruction there
1935 * should be no need to check again here.
1936 */
1937 if (!pVCpu->iem.s.fBypassHandlers)
1938 {
1939 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1940 cbToTryRead, PGMACCESSORIGIN_IEM);
1941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1942 { /* likely */ }
1943 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1944 {
1945 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1946 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1947 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1948 }
1949 else
1950 {
1951 Log((RT_SUCCESS(rcStrict)
1952 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1953 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1954 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1955 return rcStrict;
1956 }
1957 }
1958 else
1959 {
1960 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1961 if (RT_SUCCESS(rc))
1962 { /* likely */ }
1963 else
1964 {
1965 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1966 return rc;
1967 }
1968 }
1969 pVCpu->iem.s.cbOpcode += cbToTryRead;
1970 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1971
1972 return VINF_SUCCESS;
1973}
1974
1975#endif /* !IEM_WITH_CODE_TLB */
1976#ifndef IEM_WITH_SETJMP
1977
1978/**
1979 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1980 *
1981 * @returns Strict VBox status code.
1982 * @param pVCpu The cross context virtual CPU structure of the
1983 * calling thread.
1984 * @param pb Where to return the opcode byte.
1985 */
1986DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1987{
1988 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1989 if (rcStrict == VINF_SUCCESS)
1990 {
1991 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1992 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1993 pVCpu->iem.s.offOpcode = offOpcode + 1;
1994 }
1995 else
1996 *pb = 0;
1997 return rcStrict;
1998}
1999
2000
2001/**
2002 * Fetches the next opcode byte.
2003 *
2004 * @returns Strict VBox status code.
2005 * @param pVCpu The cross context virtual CPU structure of the
2006 * calling thread.
2007 * @param pu8 Where to return the opcode byte.
2008 */
2009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2010{
2011 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2012 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2013 {
2014 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2015 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2016 return VINF_SUCCESS;
2017 }
2018 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2019}
2020
2021#else /* IEM_WITH_SETJMP */
2022
2023/**
2024 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2025 *
2026 * @returns The opcode byte.
2027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2028 */
2029DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2030{
2031# ifdef IEM_WITH_CODE_TLB
2032 uint8_t u8;
2033 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2034 return u8;
2035# else
2036 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2037 if (rcStrict == VINF_SUCCESS)
2038 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2039 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2040# endif
2041}
2042
2043
2044/**
2045 * Fetches the next opcode byte, longjmp on error.
2046 *
2047 * @returns The opcode byte.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 */
2050DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2051{
2052# ifdef IEM_WITH_CODE_TLB
2053 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2054 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2055 if (RT_LIKELY( pbBuf != NULL
2056 && offBuf < pVCpu->iem.s.cbInstrBuf))
2057 {
2058 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2059 return pbBuf[offBuf];
2060 }
2061# else
2062 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 return pVCpu->iem.s.abOpcode[offOpcode];
2067 }
2068# endif
2069 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2070}
2071
2072#endif /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Fetches the next opcode byte, returns automatically on failure.
2076 *
2077 * @param a_pu8 Where to return the opcode byte.
2078 * @remark Implicitly references pVCpu.
2079 */
2080#ifndef IEM_WITH_SETJMP
2081# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2082 do \
2083 { \
2084 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2085 if (rcStrict2 == VINF_SUCCESS) \
2086 { /* likely */ } \
2087 else \
2088 return rcStrict2; \
2089 } while (0)
2090#else
2091# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2092#endif /* IEM_WITH_SETJMP */
2093
2094
2095#ifndef IEM_WITH_SETJMP
2096/**
2097 * Fetches the next signed byte from the opcode stream.
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 * @param pi8 Where to return the signed byte.
2102 */
2103DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2104{
2105 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2106}
2107#endif /* !IEM_WITH_SETJMP */
2108
2109
2110/**
2111 * Fetches the next signed byte from the opcode stream, returning automatically
2112 * on failure.
2113 *
2114 * @param a_pi8 Where to return the signed byte.
2115 * @remark Implicitly references pVCpu.
2116 */
2117#ifndef IEM_WITH_SETJMP
2118# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2119 do \
2120 { \
2121 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2122 if (rcStrict2 != VINF_SUCCESS) \
2123 return rcStrict2; \
2124 } while (0)
2125#else /* IEM_WITH_SETJMP */
2126# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2127
2128#endif /* IEM_WITH_SETJMP */
2129
2130#ifndef IEM_WITH_SETJMP
2131
2132/**
2133 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 * @param pu16 Where to return the opcode dword.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2140{
2141 uint8_t u8;
2142 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2143 if (rcStrict == VINF_SUCCESS)
2144 *pu16 = (int8_t)u8;
2145 return rcStrict;
2146}
2147
2148
2149/**
2150 * Fetches the next signed byte from the opcode stream, extending it to
2151 * unsigned 16-bit.
2152 *
2153 * @returns Strict VBox status code.
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pu16 Where to return the unsigned word.
2156 */
2157DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2158{
2159 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2160 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2161 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2162
2163 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2164 pVCpu->iem.s.offOpcode = offOpcode + 1;
2165 return VINF_SUCCESS;
2166}
2167
2168#endif /* !IEM_WITH_SETJMP */
2169
2170/**
2171 * Fetches the next signed byte from the opcode stream and sign-extending it to
2172 * a word, returning automatically on failure.
2173 *
2174 * @param a_pu16 Where to return the word.
2175 * @remark Implicitly references pVCpu.
2176 */
2177#ifndef IEM_WITH_SETJMP
2178# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2179 do \
2180 { \
2181 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2182 if (rcStrict2 != VINF_SUCCESS) \
2183 return rcStrict2; \
2184 } while (0)
2185#else
2186# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2187#endif
2188
2189#ifndef IEM_WITH_SETJMP
2190
2191/**
2192 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2196 * @param pu32 Where to return the opcode dword.
2197 */
2198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2199{
2200 uint8_t u8;
2201 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2202 if (rcStrict == VINF_SUCCESS)
2203 *pu32 = (int8_t)u8;
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Fetches the next signed byte from the opcode stream, extending it to
2210 * unsigned 32-bit.
2211 *
2212 * @returns Strict VBox status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param pu32 Where to return the unsigned dword.
2215 */
2216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2217{
2218 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2219 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2220 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2221
2222 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2223 pVCpu->iem.s.offOpcode = offOpcode + 1;
2224 return VINF_SUCCESS;
2225}
2226
2227#endif /* !IEM_WITH_SETJMP */
2228
2229/**
2230 * Fetches the next signed byte from the opcode stream and sign-extending it to
2231 * a word, returning automatically on failure.
2232 *
2233 * @param a_pu32 Where to return the word.
2234 * @remark Implicitly references pVCpu.
2235 */
2236#ifndef IEM_WITH_SETJMP
2237#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2238 do \
2239 { \
2240 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2241 if (rcStrict2 != VINF_SUCCESS) \
2242 return rcStrict2; \
2243 } while (0)
2244#else
2245# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2246#endif
2247
2248#ifndef IEM_WITH_SETJMP
2249
2250/**
2251 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pu64 Where to return the opcode qword.
2256 */
2257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2258{
2259 uint8_t u8;
2260 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2261 if (rcStrict == VINF_SUCCESS)
2262 *pu64 = (int8_t)u8;
2263 return rcStrict;
2264}
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, extending it to
2269 * unsigned 64-bit.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pu64 Where to return the unsigned qword.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2276{
2277 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2278 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2279 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2280
2281 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2282 pVCpu->iem.s.offOpcode = offOpcode + 1;
2283 return VINF_SUCCESS;
2284}
2285
2286#endif /* !IEM_WITH_SETJMP */
2287
2288
2289/**
2290 * Fetches the next signed byte from the opcode stream and sign-extending it to
2291 * a word, returning automatically on failure.
2292 *
2293 * @param a_pu64 Where to return the word.
2294 * @remark Implicitly references pVCpu.
2295 */
2296#ifndef IEM_WITH_SETJMP
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2298 do \
2299 { \
2300 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2301 if (rcStrict2 != VINF_SUCCESS) \
2302 return rcStrict2; \
2303 } while (0)
2304#else
2305# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2306#endif
2307
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu16 Where to return the opcode word.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2319{
2320 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2326# else
2327 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2328# endif
2329 pVCpu->iem.s.offOpcode = offOpcode + 2;
2330 }
2331 else
2332 *pu16 = 0;
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * Fetches the next opcode word.
2339 *
2340 * @returns Strict VBox status code.
2341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2342 * @param pu16 Where to return the opcode word.
2343 */
2344DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2345{
2346 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2347 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2348 {
2349 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2350# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2351 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2352# else
2353 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2354# endif
2355 return VINF_SUCCESS;
2356 }
2357 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2358}
2359
2360#else /* IEM_WITH_SETJMP */
2361
2362/**
2363 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2364 *
2365 * @returns The opcode word.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 */
2368DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2369{
2370# ifdef IEM_WITH_CODE_TLB
2371 uint16_t u16;
2372 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2373 return u16;
2374# else
2375 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2376 if (rcStrict == VINF_SUCCESS)
2377 {
2378 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2379 pVCpu->iem.s.offOpcode += 2;
2380# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2381 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2382# else
2383 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2384# endif
2385 }
2386 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2387# endif
2388}
2389
2390
2391/**
2392 * Fetches the next opcode word, longjmp on error.
2393 *
2394 * @returns The opcode word.
2395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2396 */
2397DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2398{
2399# ifdef IEM_WITH_CODE_TLB
2400 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2401 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2402 if (RT_LIKELY( pbBuf != NULL
2403 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2404 {
2405 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2406# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2407 return *(uint16_t const *)&pbBuf[offBuf];
2408# else
2409 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2410# endif
2411 }
2412# else
2413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2414 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2415 {
2416 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2419# else
2420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2421# endif
2422 }
2423# endif
2424 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2425}
2426
2427#endif /* IEM_WITH_SETJMP */
2428
2429
2430/**
2431 * Fetches the next opcode word, returns automatically on failure.
2432 *
2433 * @param a_pu16 Where to return the opcode word.
2434 * @remark Implicitly references pVCpu.
2435 */
2436#ifndef IEM_WITH_SETJMP
2437# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2438 do \
2439 { \
2440 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2441 if (rcStrict2 != VINF_SUCCESS) \
2442 return rcStrict2; \
2443 } while (0)
2444#else
2445# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2446#endif
2447
2448#ifndef IEM_WITH_SETJMP
2449
2450/**
2451 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2452 *
2453 * @returns Strict VBox status code.
2454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2455 * @param pu32 Where to return the opcode double word.
2456 */
2457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2458{
2459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2460 if (rcStrict == VINF_SUCCESS)
2461 {
2462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2463 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2464 pVCpu->iem.s.offOpcode = offOpcode + 2;
2465 }
2466 else
2467 *pu32 = 0;
2468 return rcStrict;
2469}
2470
2471
2472/**
2473 * Fetches the next opcode word, zero extending it to a double word.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2477 * @param pu32 Where to return the opcode double word.
2478 */
2479DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2480{
2481 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2482 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2483 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2484
2485 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486 pVCpu->iem.s.offOpcode = offOpcode + 2;
2487 return VINF_SUCCESS;
2488}
2489
2490#endif /* !IEM_WITH_SETJMP */
2491
2492
2493/**
2494 * Fetches the next opcode word and zero extends it to a double word, returns
2495 * automatically on failure.
2496 *
2497 * @param a_pu32 Where to return the opcode double word.
2498 * @remark Implicitly references pVCpu.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2502 do \
2503 { \
2504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2505 if (rcStrict2 != VINF_SUCCESS) \
2506 return rcStrict2; \
2507 } while (0)
2508#else
2509# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2510#endif
2511
2512#ifndef IEM_WITH_SETJMP
2513
2514/**
2515 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2516 *
2517 * @returns Strict VBox status code.
2518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2519 * @param pu64 Where to return the opcode quad word.
2520 */
2521DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2522{
2523 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2524 if (rcStrict == VINF_SUCCESS)
2525 {
2526 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2527 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528 pVCpu->iem.s.offOpcode = offOpcode + 2;
2529 }
2530 else
2531 *pu64 = 0;
2532 return rcStrict;
2533}
2534
2535
2536/**
2537 * Fetches the next opcode word, zero extending it to a quad word.
2538 *
2539 * @returns Strict VBox status code.
2540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2541 * @param pu64 Where to return the opcode quad word.
2542 */
2543DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2544{
2545 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2546 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2547 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2548
2549 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2550 pVCpu->iem.s.offOpcode = offOpcode + 2;
2551 return VINF_SUCCESS;
2552}
2553
2554#endif /* !IEM_WITH_SETJMP */
2555
2556/**
2557 * Fetches the next opcode word and zero extends it to a quad word, returns
2558 * automatically on failure.
2559 *
2560 * @param a_pu64 Where to return the opcode quad word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575
2576#ifndef IEM_WITH_SETJMP
2577/**
2578 * Fetches the next signed word from the opcode stream.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pi16 Where to return the signed word.
2583 */
2584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2585{
2586 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2587}
2588#endif /* !IEM_WITH_SETJMP */
2589
2590
2591/**
2592 * Fetches the next signed word from the opcode stream, returning automatically
2593 * on failure.
2594 *
2595 * @param a_pi16 Where to return the signed word.
2596 * @remark Implicitly references pVCpu.
2597 */
2598#ifndef IEM_WITH_SETJMP
2599# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2600 do \
2601 { \
2602 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2603 if (rcStrict2 != VINF_SUCCESS) \
2604 return rcStrict2; \
2605 } while (0)
2606#else
2607# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2608#endif
2609
2610#ifndef IEM_WITH_SETJMP
2611
2612/**
2613 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2614 *
2615 * @returns Strict VBox status code.
2616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2617 * @param pu32 Where to return the opcode dword.
2618 */
2619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2620{
2621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2622 if (rcStrict == VINF_SUCCESS)
2623 {
2624 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 pVCpu->iem.s.offOpcode = offOpcode + 4;
2634 }
2635 else
2636 *pu32 = 0;
2637 return rcStrict;
2638}
2639
2640
2641/**
2642 * Fetches the next opcode dword.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu32 Where to return the opcode double word.
2647 */
2648DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2649{
2650 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2651 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2652 {
2653 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2656# else
2657 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2658 pVCpu->iem.s.abOpcode[offOpcode + 1],
2659 pVCpu->iem.s.abOpcode[offOpcode + 2],
2660 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2661# endif
2662 return VINF_SUCCESS;
2663 }
2664 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2665}
2666
2667#else /* !IEM_WITH_SETJMP */
2668
2669/**
2670 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2671 *
2672 * @returns The opcode dword.
2673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2674 */
2675DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2676{
2677# ifdef IEM_WITH_CODE_TLB
2678 uint32_t u32;
2679 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2680 return u32;
2681# else
2682 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2683 if (rcStrict == VINF_SUCCESS)
2684 {
2685 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2686 pVCpu->iem.s.offOpcode = offOpcode + 4;
2687# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2688 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2689# else
2690 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2691 pVCpu->iem.s.abOpcode[offOpcode + 1],
2692 pVCpu->iem.s.abOpcode[offOpcode + 2],
2693 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2694# endif
2695 }
2696 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2697# endif
2698}
2699
2700
2701/**
2702 * Fetches the next opcode dword, longjmp on error.
2703 *
2704 * @returns The opcode dword.
2705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2706 */
2707DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2708{
2709# ifdef IEM_WITH_CODE_TLB
2710 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2711 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2712 if (RT_LIKELY( pbBuf != NULL
2713 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2714 {
2715 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2716# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2717 return *(uint32_t const *)&pbBuf[offBuf];
2718# else
2719 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2720 pbBuf[offBuf + 1],
2721 pbBuf[offBuf + 2],
2722 pbBuf[offBuf + 3]);
2723# endif
2724 }
2725# else
2726 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2727 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2728 {
2729 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2731 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2732# else
2733 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2734 pVCpu->iem.s.abOpcode[offOpcode + 1],
2735 pVCpu->iem.s.abOpcode[offOpcode + 2],
2736 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2737# endif
2738 }
2739# endif
2740 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2741}
2742
2743#endif /* !IEM_WITH_SETJMP */
2744
2745
2746/**
2747 * Fetches the next opcode dword, returns automatically on failure.
2748 *
2749 * @param a_pu32 Where to return the opcode dword.
2750 * @remark Implicitly references pVCpu.
2751 */
2752#ifndef IEM_WITH_SETJMP
2753# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2754 do \
2755 { \
2756 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2757 if (rcStrict2 != VINF_SUCCESS) \
2758 return rcStrict2; \
2759 } while (0)
2760#else
2761# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2762#endif
2763
2764#ifndef IEM_WITH_SETJMP
2765
2766/**
2767 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode dword.
2772 */
2773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2776 if (rcStrict == VINF_SUCCESS)
2777 {
2778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2779 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2780 pVCpu->iem.s.abOpcode[offOpcode + 1],
2781 pVCpu->iem.s.abOpcode[offOpcode + 2],
2782 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2783 pVCpu->iem.s.offOpcode = offOpcode + 4;
2784 }
2785 else
2786 *pu64 = 0;
2787 return rcStrict;
2788}
2789
2790
2791/**
2792 * Fetches the next opcode dword, zero extending it to a quad word.
2793 *
2794 * @returns Strict VBox status code.
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param pu64 Where to return the opcode quad word.
2797 */
2798DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2799{
2800 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2801 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2802 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2803
2804 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2805 pVCpu->iem.s.abOpcode[offOpcode + 1],
2806 pVCpu->iem.s.abOpcode[offOpcode + 2],
2807 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2808 pVCpu->iem.s.offOpcode = offOpcode + 4;
2809 return VINF_SUCCESS;
2810}
2811
2812#endif /* !IEM_WITH_SETJMP */
2813
2814
2815/**
2816 * Fetches the next opcode dword and zero extends it to a quad word, returns
2817 * automatically on failure.
2818 *
2819 * @param a_pu64 Where to return the opcode quad word.
2820 * @remark Implicitly references pVCpu.
2821 */
2822#ifndef IEM_WITH_SETJMP
2823# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2824 do \
2825 { \
2826 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2827 if (rcStrict2 != VINF_SUCCESS) \
2828 return rcStrict2; \
2829 } while (0)
2830#else
2831# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2832#endif
2833
2834
2835#ifndef IEM_WITH_SETJMP
2836/**
2837 * Fetches the next signed double word from the opcode stream.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pi32 Where to return the signed double word.
2842 */
2843DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2844{
2845 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2846}
2847#endif
2848
2849/**
2850 * Fetches the next signed double word from the opcode stream, returning
2851 * automatically on failure.
2852 *
2853 * @param a_pi32 Where to return the signed double word.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode qword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, sign extending it into a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2907
2908 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 *pu64 = i32;
2913 pVCpu->iem.s.offOpcode = offOpcode + 4;
2914 return VINF_SUCCESS;
2915}
2916
2917#endif /* !IEM_WITH_SETJMP */
2918
2919
2920/**
2921 * Fetches the next opcode double word and sign extends it to a quad word,
2922 * returns automatically on failure.
2923 *
2924 * @param a_pu64 Where to return the opcode quad word.
2925 * @remark Implicitly references pVCpu.
2926 */
2927#ifndef IEM_WITH_SETJMP
2928# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2929 do \
2930 { \
2931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2932 if (rcStrict2 != VINF_SUCCESS) \
2933 return rcStrict2; \
2934 } while (0)
2935#else
2936# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2937#endif
2938
2939#ifndef IEM_WITH_SETJMP
2940
2941/**
2942 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2943 *
2944 * @returns Strict VBox status code.
2945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2946 * @param pu64 Where to return the opcode qword.
2947 */
2948DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2949{
2950 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2951 if (rcStrict == VINF_SUCCESS)
2952 {
2953 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2956# else
2957 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2958 pVCpu->iem.s.abOpcode[offOpcode + 1],
2959 pVCpu->iem.s.abOpcode[offOpcode + 2],
2960 pVCpu->iem.s.abOpcode[offOpcode + 3],
2961 pVCpu->iem.s.abOpcode[offOpcode + 4],
2962 pVCpu->iem.s.abOpcode[offOpcode + 5],
2963 pVCpu->iem.s.abOpcode[offOpcode + 6],
2964 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2965# endif
2966 pVCpu->iem.s.offOpcode = offOpcode + 8;
2967 }
2968 else
2969 *pu64 = 0;
2970 return rcStrict;
2971}
2972
2973
2974/**
2975 * Fetches the next opcode qword.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2984 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2985 {
2986# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2987 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2988# else
2989 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2990 pVCpu->iem.s.abOpcode[offOpcode + 1],
2991 pVCpu->iem.s.abOpcode[offOpcode + 2],
2992 pVCpu->iem.s.abOpcode[offOpcode + 3],
2993 pVCpu->iem.s.abOpcode[offOpcode + 4],
2994 pVCpu->iem.s.abOpcode[offOpcode + 5],
2995 pVCpu->iem.s.abOpcode[offOpcode + 6],
2996 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2997# endif
2998 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2999 return VINF_SUCCESS;
3000 }
3001 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3002}
3003
3004#else /* IEM_WITH_SETJMP */
3005
3006/**
3007 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3008 *
3009 * @returns The opcode qword.
3010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3013{
3014# ifdef IEM_WITH_CODE_TLB
3015 uint64_t u64;
3016 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3017 return u64;
3018# else
3019 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3020 if (rcStrict == VINF_SUCCESS)
3021 {
3022 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3023 pVCpu->iem.s.offOpcode = offOpcode + 8;
3024# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3025 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3026# else
3027 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3028 pVCpu->iem.s.abOpcode[offOpcode + 1],
3029 pVCpu->iem.s.abOpcode[offOpcode + 2],
3030 pVCpu->iem.s.abOpcode[offOpcode + 3],
3031 pVCpu->iem.s.abOpcode[offOpcode + 4],
3032 pVCpu->iem.s.abOpcode[offOpcode + 5],
3033 pVCpu->iem.s.abOpcode[offOpcode + 6],
3034 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3035# endif
3036 }
3037 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3038# endif
3039}
3040
3041
3042/**
3043 * Fetches the next opcode qword, longjmp on error.
3044 *
3045 * @returns The opcode qword.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 */
3048DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3049{
3050# ifdef IEM_WITH_CODE_TLB
3051 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3052 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3053 if (RT_LIKELY( pbBuf != NULL
3054 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3055 {
3056 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 return *(uint64_t const *)&pbBuf[offBuf];
3059# else
3060 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3061 pbBuf[offBuf + 1],
3062 pbBuf[offBuf + 2],
3063 pbBuf[offBuf + 3],
3064 pbBuf[offBuf + 4],
3065 pbBuf[offBuf + 5],
3066 pbBuf[offBuf + 6],
3067 pbBuf[offBuf + 7]);
3068# endif
3069 }
3070# else
3071 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3072 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3073 {
3074 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3075# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3076 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3077# else
3078 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3],
3082 pVCpu->iem.s.abOpcode[offOpcode + 4],
3083 pVCpu->iem.s.abOpcode[offOpcode + 5],
3084 pVCpu->iem.s.abOpcode[offOpcode + 6],
3085 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3086# endif
3087 }
3088# endif
3089 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3090}
3091
3092#endif /* IEM_WITH_SETJMP */
3093
3094/**
3095 * Fetches the next opcode quad word, returns automatically on failure.
3096 *
3097 * @param a_pu64 Where to return the opcode quad word.
3098 * @remark Implicitly references pVCpu.
3099 */
3100#ifndef IEM_WITH_SETJMP
3101# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3102 do \
3103 { \
3104 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3105 if (rcStrict2 != VINF_SUCCESS) \
3106 return rcStrict2; \
3107 } while (0)
3108#else
3109# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3110#endif
3111
3112
3113/** @name Misc Worker Functions.
3114 * @{
3115 */
3116
3117/* Currently used only with nested hw.virt. */
3118#ifdef VBOX_WITH_NESTED_HWVIRT
3119/**
3120 * Initiates a CPU shutdown sequence.
3121 *
3122 * @returns Strict VBox status code.
3123 * @param pVCpu The cross context virtual CPU structure of the
3124 * calling thread.
3125 */
3126IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3127{
3128 RT_NOREF_PV(pVCpu);
3129 /** @todo Probably need a separate error code and handling for this to
3130 * distinguish it from the regular triple fault. */
3131 return VINF_EM_TRIPLE_FAULT;
3132}
3133#endif
3134
3135/**
3136 * Validates a new SS segment.
3137 *
3138 * @returns VBox strict status code.
3139 * @param pVCpu The cross context virtual CPU structure of the
3140 * calling thread.
3141 * @param pCtx The CPU context.
3142 * @param NewSS The new SS selctor.
3143 * @param uCpl The CPL to load the stack for.
3144 * @param pDesc Where to return the descriptor.
3145 */
3146IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3147{
3148 NOREF(pCtx);
3149
3150 /* Null selectors are not allowed (we're not called for dispatching
3151 interrupts with SS=0 in long mode). */
3152 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3153 {
3154 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3155 return iemRaiseTaskSwitchFault0(pVCpu);
3156 }
3157
3158 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3159 if ((NewSS & X86_SEL_RPL) != uCpl)
3160 {
3161 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3162 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3163 }
3164
3165 /*
3166 * Read the descriptor.
3167 */
3168 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3169 if (rcStrict != VINF_SUCCESS)
3170 return rcStrict;
3171
3172 /*
3173 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3174 */
3175 if (!pDesc->Legacy.Gen.u1DescType)
3176 {
3177 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3178 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3179 }
3180
3181 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3182 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3183 {
3184 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3185 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3186 }
3187 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3188 {
3189 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3190 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3191 }
3192
3193 /* Is it there? */
3194 /** @todo testcase: Is this checked before the canonical / limit check below? */
3195 if (!pDesc->Legacy.Gen.u1Present)
3196 {
3197 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3198 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3199 }
3200
3201 return VINF_SUCCESS;
3202}
3203
3204
3205/**
3206 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3207 * not.
3208 *
3209 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3210 * @param a_pCtx The CPU context.
3211 */
3212#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3213# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3214 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3215 ? (a_pCtx)->eflags.u \
3216 : CPUMRawGetEFlags(a_pVCpu) )
3217#else
3218# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3219 ( (a_pCtx)->eflags.u )
3220#endif
3221
3222/**
3223 * Updates the EFLAGS in the correct manner wrt. PATM.
3224 *
3225 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3226 * @param a_pCtx The CPU context.
3227 * @param a_fEfl The new EFLAGS.
3228 */
3229#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3230# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3231 do { \
3232 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3233 (a_pCtx)->eflags.u = (a_fEfl); \
3234 else \
3235 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3236 } while (0)
3237#else
3238# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3239 do { \
3240 (a_pCtx)->eflags.u = (a_fEfl); \
3241 } while (0)
3242#endif
3243
3244
3245/** @} */
3246
3247/** @name Raising Exceptions.
3248 *
3249 * @{
3250 */
3251
3252/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3253 * @{ */
3254/** CPU exception. */
3255#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3256/** External interrupt (from PIC, APIC, whatever). */
3257#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3258/** Software interrupt (int or into, not bound).
3259 * Returns to the following instruction */
3260#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3261/** Takes an error code. */
3262#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3263/** Takes a CR2. */
3264#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3265/** Generated by the breakpoint instruction. */
3266#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3267/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3268#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3269/** @} */
3270
3271
3272/**
3273 * Loads the specified stack far pointer from the TSS.
3274 *
3275 * @returns VBox strict status code.
3276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3277 * @param pCtx The CPU context.
3278 * @param uCpl The CPL to load the stack for.
3279 * @param pSelSS Where to return the new stack segment.
3280 * @param puEsp Where to return the new stack pointer.
3281 */
3282IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3283 PRTSEL pSelSS, uint32_t *puEsp)
3284{
3285 VBOXSTRICTRC rcStrict;
3286 Assert(uCpl < 4);
3287
3288 switch (pCtx->tr.Attr.n.u4Type)
3289 {
3290 /*
3291 * 16-bit TSS (X86TSS16).
3292 */
3293 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3294 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3295 {
3296 uint32_t off = uCpl * 4 + 2;
3297 if (off + 4 <= pCtx->tr.u32Limit)
3298 {
3299 /** @todo check actual access pattern here. */
3300 uint32_t u32Tmp = 0; /* gcc maybe... */
3301 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3302 if (rcStrict == VINF_SUCCESS)
3303 {
3304 *puEsp = RT_LOWORD(u32Tmp);
3305 *pSelSS = RT_HIWORD(u32Tmp);
3306 return VINF_SUCCESS;
3307 }
3308 }
3309 else
3310 {
3311 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3312 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3313 }
3314 break;
3315 }
3316
3317 /*
3318 * 32-bit TSS (X86TSS32).
3319 */
3320 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3321 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3322 {
3323 uint32_t off = uCpl * 8 + 4;
3324 if (off + 7 <= pCtx->tr.u32Limit)
3325 {
3326/** @todo check actual access pattern here. */
3327 uint64_t u64Tmp;
3328 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3329 if (rcStrict == VINF_SUCCESS)
3330 {
3331 *puEsp = u64Tmp & UINT32_MAX;
3332 *pSelSS = (RTSEL)(u64Tmp >> 32);
3333 return VINF_SUCCESS;
3334 }
3335 }
3336 else
3337 {
3338 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3339 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3340 }
3341 break;
3342 }
3343
3344 default:
3345 AssertFailed();
3346 rcStrict = VERR_IEM_IPE_4;
3347 break;
3348 }
3349
3350 *puEsp = 0; /* make gcc happy */
3351 *pSelSS = 0; /* make gcc happy */
3352 return rcStrict;
3353}
3354
3355
3356/**
3357 * Loads the specified stack pointer from the 64-bit TSS.
3358 *
3359 * @returns VBox strict status code.
3360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3361 * @param pCtx The CPU context.
3362 * @param uCpl The CPL to load the stack for.
3363 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3364 * @param puRsp Where to return the new stack pointer.
3365 */
3366IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3367{
3368 Assert(uCpl < 4);
3369 Assert(uIst < 8);
3370 *puRsp = 0; /* make gcc happy */
3371
3372 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3373
3374 uint32_t off;
3375 if (uIst)
3376 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3377 else
3378 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3379 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3380 {
3381 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3382 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3383 }
3384
3385 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3386}
3387
3388
3389/**
3390 * Adjust the CPU state according to the exception being raised.
3391 *
3392 * @param pCtx The CPU context.
3393 * @param u8Vector The exception that has been raised.
3394 */
3395DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3396{
3397 switch (u8Vector)
3398 {
3399 case X86_XCPT_DB:
3400 pCtx->dr[7] &= ~X86_DR7_GD;
3401 break;
3402 /** @todo Read the AMD and Intel exception reference... */
3403 }
3404}
3405
3406
3407/**
3408 * Implements exceptions and interrupts for real mode.
3409 *
3410 * @returns VBox strict status code.
3411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3412 * @param pCtx The CPU context.
3413 * @param cbInstr The number of bytes to offset rIP by in the return
3414 * address.
3415 * @param u8Vector The interrupt / exception vector number.
3416 * @param fFlags The flags.
3417 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3418 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3419 */
3420IEM_STATIC VBOXSTRICTRC
3421iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3422 PCPUMCTX pCtx,
3423 uint8_t cbInstr,
3424 uint8_t u8Vector,
3425 uint32_t fFlags,
3426 uint16_t uErr,
3427 uint64_t uCr2)
3428{
3429 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3430 NOREF(uErr); NOREF(uCr2);
3431
3432 /*
3433 * Read the IDT entry.
3434 */
3435 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3436 {
3437 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3438 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3439 }
3440 RTFAR16 Idte;
3441 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3442 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3443 return rcStrict;
3444
3445 /*
3446 * Push the stack frame.
3447 */
3448 uint16_t *pu16Frame;
3449 uint64_t uNewRsp;
3450 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453
3454 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3455#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3456 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3457 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3458 fEfl |= UINT16_C(0xf000);
3459#endif
3460 pu16Frame[2] = (uint16_t)fEfl;
3461 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3462 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3463 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3464 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3465 return rcStrict;
3466
3467 /*
3468 * Load the vector address into cs:ip and make exception specific state
3469 * adjustments.
3470 */
3471 pCtx->cs.Sel = Idte.sel;
3472 pCtx->cs.ValidSel = Idte.sel;
3473 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3474 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3475 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3476 pCtx->rip = Idte.off;
3477 fEfl &= ~X86_EFL_IF;
3478 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3479
3480 /** @todo do we actually do this in real mode? */
3481 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3482 iemRaiseXcptAdjustState(pCtx, u8Vector);
3483
3484 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3485}
3486
3487
3488/**
3489 * Loads a NULL data selector into when coming from V8086 mode.
3490 *
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pSReg Pointer to the segment register.
3493 */
3494IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3495{
3496 pSReg->Sel = 0;
3497 pSReg->ValidSel = 0;
3498 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3499 {
3500 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3501 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3502 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3503 }
3504 else
3505 {
3506 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3507 /** @todo check this on AMD-V */
3508 pSReg->u64Base = 0;
3509 pSReg->u32Limit = 0;
3510 }
3511}
3512
3513
3514/**
3515 * Loads a segment selector during a task switch in V8086 mode.
3516 *
3517 * @param pSReg Pointer to the segment register.
3518 * @param uSel The selector value to load.
3519 */
3520IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3521{
3522 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3523 pSReg->Sel = uSel;
3524 pSReg->ValidSel = uSel;
3525 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3526 pSReg->u64Base = uSel << 4;
3527 pSReg->u32Limit = 0xffff;
3528 pSReg->Attr.u = 0xf3;
3529}
3530
3531
3532/**
3533 * Loads a NULL data selector into a selector register, both the hidden and
3534 * visible parts, in protected mode.
3535 *
3536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3537 * @param pSReg Pointer to the segment register.
3538 * @param uRpl The RPL.
3539 */
3540IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3541{
3542 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3543 * data selector in protected mode. */
3544 pSReg->Sel = uRpl;
3545 pSReg->ValidSel = uRpl;
3546 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3547 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3548 {
3549 /* VT-x (Intel 3960x) observed doing something like this. */
3550 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3551 pSReg->u32Limit = UINT32_MAX;
3552 pSReg->u64Base = 0;
3553 }
3554 else
3555 {
3556 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3557 pSReg->u32Limit = 0;
3558 pSReg->u64Base = 0;
3559 }
3560}
3561
3562
3563/**
3564 * Loads a segment selector during a task switch in protected mode.
3565 *
3566 * In this task switch scenario, we would throw \#TS exceptions rather than
3567 * \#GPs.
3568 *
3569 * @returns VBox strict status code.
3570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3571 * @param pSReg Pointer to the segment register.
3572 * @param uSel The new selector value.
3573 *
3574 * @remarks This does _not_ handle CS or SS.
3575 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3576 */
3577IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3578{
3579 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3580
3581 /* Null data selector. */
3582 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3583 {
3584 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3585 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3586 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3587 return VINF_SUCCESS;
3588 }
3589
3590 /* Fetch the descriptor. */
3591 IEMSELDESC Desc;
3592 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3593 if (rcStrict != VINF_SUCCESS)
3594 {
3595 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3596 VBOXSTRICTRC_VAL(rcStrict)));
3597 return rcStrict;
3598 }
3599
3600 /* Must be a data segment or readable code segment. */
3601 if ( !Desc.Legacy.Gen.u1DescType
3602 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3603 {
3604 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3605 Desc.Legacy.Gen.u4Type));
3606 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3607 }
3608
3609 /* Check privileges for data segments and non-conforming code segments. */
3610 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3611 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3612 {
3613 /* The RPL and the new CPL must be less than or equal to the DPL. */
3614 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3615 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3616 {
3617 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3618 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3619 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3620 }
3621 }
3622
3623 /* Is it there? */
3624 if (!Desc.Legacy.Gen.u1Present)
3625 {
3626 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3627 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3628 }
3629
3630 /* The base and limit. */
3631 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3632 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3633
3634 /*
3635 * Ok, everything checked out fine. Now set the accessed bit before
3636 * committing the result into the registers.
3637 */
3638 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3639 {
3640 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3644 }
3645
3646 /* Commit */
3647 pSReg->Sel = uSel;
3648 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3649 pSReg->u32Limit = cbLimit;
3650 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3651 pSReg->ValidSel = uSel;
3652 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3653 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3654 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3655
3656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3657 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3658 return VINF_SUCCESS;
3659}
3660
3661
3662/**
3663 * Performs a task switch.
3664 *
3665 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3666 * caller is responsible for performing the necessary checks (like DPL, TSS
3667 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3668 * reference for JMP, CALL, IRET.
3669 *
3670 * If the task switch is the due to a software interrupt or hardware exception,
3671 * the caller is responsible for validating the TSS selector and descriptor. See
3672 * Intel Instruction reference for INT n.
3673 *
3674 * @returns VBox strict status code.
3675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3676 * @param pCtx The CPU context.
3677 * @param enmTaskSwitch What caused this task switch.
3678 * @param uNextEip The EIP effective after the task switch.
3679 * @param fFlags The flags.
3680 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3681 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3682 * @param SelTSS The TSS selector of the new task.
3683 * @param pNewDescTSS Pointer to the new TSS descriptor.
3684 */
3685IEM_STATIC VBOXSTRICTRC
3686iemTaskSwitch(PVMCPU pVCpu,
3687 PCPUMCTX pCtx,
3688 IEMTASKSWITCH enmTaskSwitch,
3689 uint32_t uNextEip,
3690 uint32_t fFlags,
3691 uint16_t uErr,
3692 uint64_t uCr2,
3693 RTSEL SelTSS,
3694 PIEMSELDESC pNewDescTSS)
3695{
3696 Assert(!IEM_IS_REAL_MODE(pVCpu));
3697 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3698
3699 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3700 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3701 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3702 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3703 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3704
3705 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3706 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3707
3708 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3709 fIsNewTSS386, pCtx->eip, uNextEip));
3710
3711 /* Update CR2 in case it's a page-fault. */
3712 /** @todo This should probably be done much earlier in IEM/PGM. See
3713 * @bugref{5653#c49}. */
3714 if (fFlags & IEM_XCPT_FLAGS_CR2)
3715 pCtx->cr2 = uCr2;
3716
3717 /*
3718 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3719 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3720 */
3721 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3722 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3723 if (uNewTSSLimit < uNewTSSLimitMin)
3724 {
3725 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3726 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3727 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3728 }
3729
3730 /*
3731 * Check the current TSS limit. The last written byte to the current TSS during the
3732 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3733 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3734 *
3735 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3736 * end up with smaller than "legal" TSS limits.
3737 */
3738 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3739 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3740 if (uCurTSSLimit < uCurTSSLimitMin)
3741 {
3742 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3743 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3744 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3745 }
3746
3747 /*
3748 * Verify that the new TSS can be accessed and map it. Map only the required contents
3749 * and not the entire TSS.
3750 */
3751 void *pvNewTSS;
3752 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3753 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3754 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3755 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3756 * not perform correct translation if this happens. See Intel spec. 7.2.1
3757 * "Task-State Segment" */
3758 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3759 if (rcStrict != VINF_SUCCESS)
3760 {
3761 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3762 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 /*
3767 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3768 */
3769 uint32_t u32EFlags = pCtx->eflags.u32;
3770 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3771 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3772 {
3773 PX86DESC pDescCurTSS;
3774 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3775 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3776 if (rcStrict != VINF_SUCCESS)
3777 {
3778 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3779 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3780 return rcStrict;
3781 }
3782
3783 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3784 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3785 if (rcStrict != VINF_SUCCESS)
3786 {
3787 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3788 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3789 return rcStrict;
3790 }
3791
3792 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3793 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3794 {
3795 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3796 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3797 u32EFlags &= ~X86_EFL_NT;
3798 }
3799 }
3800
3801 /*
3802 * Save the CPU state into the current TSS.
3803 */
3804 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3805 if (GCPtrNewTSS == GCPtrCurTSS)
3806 {
3807 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3808 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3809 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3810 }
3811 if (fIsNewTSS386)
3812 {
3813 /*
3814 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3815 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3816 */
3817 void *pvCurTSS32;
3818 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3819 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3820 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3821 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3822 if (rcStrict != VINF_SUCCESS)
3823 {
3824 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3825 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3826 return rcStrict;
3827 }
3828
3829 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3830 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3831 pCurTSS32->eip = uNextEip;
3832 pCurTSS32->eflags = u32EFlags;
3833 pCurTSS32->eax = pCtx->eax;
3834 pCurTSS32->ecx = pCtx->ecx;
3835 pCurTSS32->edx = pCtx->edx;
3836 pCurTSS32->ebx = pCtx->ebx;
3837 pCurTSS32->esp = pCtx->esp;
3838 pCurTSS32->ebp = pCtx->ebp;
3839 pCurTSS32->esi = pCtx->esi;
3840 pCurTSS32->edi = pCtx->edi;
3841 pCurTSS32->es = pCtx->es.Sel;
3842 pCurTSS32->cs = pCtx->cs.Sel;
3843 pCurTSS32->ss = pCtx->ss.Sel;
3844 pCurTSS32->ds = pCtx->ds.Sel;
3845 pCurTSS32->fs = pCtx->fs.Sel;
3846 pCurTSS32->gs = pCtx->gs.Sel;
3847
3848 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3849 if (rcStrict != VINF_SUCCESS)
3850 {
3851 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3852 VBOXSTRICTRC_VAL(rcStrict)));
3853 return rcStrict;
3854 }
3855 }
3856 else
3857 {
3858 /*
3859 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3860 */
3861 void *pvCurTSS16;
3862 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3863 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3864 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3865 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3866 if (rcStrict != VINF_SUCCESS)
3867 {
3868 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3869 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3870 return rcStrict;
3871 }
3872
3873 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3874 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3875 pCurTSS16->ip = uNextEip;
3876 pCurTSS16->flags = u32EFlags;
3877 pCurTSS16->ax = pCtx->ax;
3878 pCurTSS16->cx = pCtx->cx;
3879 pCurTSS16->dx = pCtx->dx;
3880 pCurTSS16->bx = pCtx->bx;
3881 pCurTSS16->sp = pCtx->sp;
3882 pCurTSS16->bp = pCtx->bp;
3883 pCurTSS16->si = pCtx->si;
3884 pCurTSS16->di = pCtx->di;
3885 pCurTSS16->es = pCtx->es.Sel;
3886 pCurTSS16->cs = pCtx->cs.Sel;
3887 pCurTSS16->ss = pCtx->ss.Sel;
3888 pCurTSS16->ds = pCtx->ds.Sel;
3889
3890 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3891 if (rcStrict != VINF_SUCCESS)
3892 {
3893 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3894 VBOXSTRICTRC_VAL(rcStrict)));
3895 return rcStrict;
3896 }
3897 }
3898
3899 /*
3900 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3901 */
3902 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3903 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3904 {
3905 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3906 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3907 pNewTSS->selPrev = pCtx->tr.Sel;
3908 }
3909
3910 /*
3911 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3912 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3913 */
3914 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3915 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3916 bool fNewDebugTrap;
3917 if (fIsNewTSS386)
3918 {
3919 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3920 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3921 uNewEip = pNewTSS32->eip;
3922 uNewEflags = pNewTSS32->eflags;
3923 uNewEax = pNewTSS32->eax;
3924 uNewEcx = pNewTSS32->ecx;
3925 uNewEdx = pNewTSS32->edx;
3926 uNewEbx = pNewTSS32->ebx;
3927 uNewEsp = pNewTSS32->esp;
3928 uNewEbp = pNewTSS32->ebp;
3929 uNewEsi = pNewTSS32->esi;
3930 uNewEdi = pNewTSS32->edi;
3931 uNewES = pNewTSS32->es;
3932 uNewCS = pNewTSS32->cs;
3933 uNewSS = pNewTSS32->ss;
3934 uNewDS = pNewTSS32->ds;
3935 uNewFS = pNewTSS32->fs;
3936 uNewGS = pNewTSS32->gs;
3937 uNewLdt = pNewTSS32->selLdt;
3938 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3939 }
3940 else
3941 {
3942 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3943 uNewCr3 = 0;
3944 uNewEip = pNewTSS16->ip;
3945 uNewEflags = pNewTSS16->flags;
3946 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3947 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3948 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3949 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3950 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3951 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3952 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3953 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3954 uNewES = pNewTSS16->es;
3955 uNewCS = pNewTSS16->cs;
3956 uNewSS = pNewTSS16->ss;
3957 uNewDS = pNewTSS16->ds;
3958 uNewFS = 0;
3959 uNewGS = 0;
3960 uNewLdt = pNewTSS16->selLdt;
3961 fNewDebugTrap = false;
3962 }
3963
3964 if (GCPtrNewTSS == GCPtrCurTSS)
3965 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3966 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3967
3968 /*
3969 * We're done accessing the new TSS.
3970 */
3971 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3972 if (rcStrict != VINF_SUCCESS)
3973 {
3974 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3975 return rcStrict;
3976 }
3977
3978 /*
3979 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3980 */
3981 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3982 {
3983 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3984 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3985 if (rcStrict != VINF_SUCCESS)
3986 {
3987 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3988 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3989 return rcStrict;
3990 }
3991
3992 /* Check that the descriptor indicates the new TSS is available (not busy). */
3993 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3994 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3995 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3996
3997 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3998 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3999 if (rcStrict != VINF_SUCCESS)
4000 {
4001 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4002 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4003 return rcStrict;
4004 }
4005 }
4006
4007 /*
4008 * From this point on, we're technically in the new task. We will defer exceptions
4009 * until the completion of the task switch but before executing any instructions in the new task.
4010 */
4011 pCtx->tr.Sel = SelTSS;
4012 pCtx->tr.ValidSel = SelTSS;
4013 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4014 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4015 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4016 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4017 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4018
4019 /* Set the busy bit in TR. */
4020 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4021 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4022 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4023 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4024 {
4025 uNewEflags |= X86_EFL_NT;
4026 }
4027
4028 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4029 pCtx->cr0 |= X86_CR0_TS;
4030 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4031
4032 pCtx->eip = uNewEip;
4033 pCtx->eax = uNewEax;
4034 pCtx->ecx = uNewEcx;
4035 pCtx->edx = uNewEdx;
4036 pCtx->ebx = uNewEbx;
4037 pCtx->esp = uNewEsp;
4038 pCtx->ebp = uNewEbp;
4039 pCtx->esi = uNewEsi;
4040 pCtx->edi = uNewEdi;
4041
4042 uNewEflags &= X86_EFL_LIVE_MASK;
4043 uNewEflags |= X86_EFL_RA1_MASK;
4044 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4045
4046 /*
4047 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4048 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4049 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4050 */
4051 pCtx->es.Sel = uNewES;
4052 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4053
4054 pCtx->cs.Sel = uNewCS;
4055 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4056
4057 pCtx->ss.Sel = uNewSS;
4058 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4059
4060 pCtx->ds.Sel = uNewDS;
4061 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4062
4063 pCtx->fs.Sel = uNewFS;
4064 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4065
4066 pCtx->gs.Sel = uNewGS;
4067 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4069
4070 pCtx->ldtr.Sel = uNewLdt;
4071 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4072 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4073 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4074
4075 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4076 {
4077 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4078 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4079 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4080 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4081 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4082 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4083 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4084 }
4085
4086 /*
4087 * Switch CR3 for the new task.
4088 */
4089 if ( fIsNewTSS386
4090 && (pCtx->cr0 & X86_CR0_PG))
4091 {
4092 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4093 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4094 {
4095 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4096 AssertRCSuccessReturn(rc, rc);
4097 }
4098 else
4099 pCtx->cr3 = uNewCr3;
4100
4101 /* Inform PGM. */
4102 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4103 {
4104 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4105 AssertRCReturn(rc, rc);
4106 /* ignore informational status codes */
4107 }
4108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4109 }
4110
4111 /*
4112 * Switch LDTR for the new task.
4113 */
4114 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4115 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4116 else
4117 {
4118 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4119
4120 IEMSELDESC DescNewLdt;
4121 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4122 if (rcStrict != VINF_SUCCESS)
4123 {
4124 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4125 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4126 return rcStrict;
4127 }
4128 if ( !DescNewLdt.Legacy.Gen.u1Present
4129 || DescNewLdt.Legacy.Gen.u1DescType
4130 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4131 {
4132 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4133 uNewLdt, DescNewLdt.Legacy.u));
4134 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4135 }
4136
4137 pCtx->ldtr.ValidSel = uNewLdt;
4138 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4139 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4140 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4141 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4142 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4143 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4145 }
4146
4147 IEMSELDESC DescSS;
4148 if (IEM_IS_V86_MODE(pVCpu))
4149 {
4150 pVCpu->iem.s.uCpl = 3;
4151 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4152 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4153 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4154 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4155 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4156 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4157
4158 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4159 DescSS.Legacy.u = 0;
4160 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4161 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4162 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4163 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4164 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4165 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4166 DescSS.Legacy.Gen.u2Dpl = 3;
4167 }
4168 else
4169 {
4170 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4171
4172 /*
4173 * Load the stack segment for the new task.
4174 */
4175 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4176 {
4177 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4178 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4179 }
4180
4181 /* Fetch the descriptor. */
4182 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4183 if (rcStrict != VINF_SUCCESS)
4184 {
4185 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4186 VBOXSTRICTRC_VAL(rcStrict)));
4187 return rcStrict;
4188 }
4189
4190 /* SS must be a data segment and writable. */
4191 if ( !DescSS.Legacy.Gen.u1DescType
4192 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4193 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4194 {
4195 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4196 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4198 }
4199
4200 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4201 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4202 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4203 {
4204 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4205 uNewCpl));
4206 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4207 }
4208
4209 /* Is it there? */
4210 if (!DescSS.Legacy.Gen.u1Present)
4211 {
4212 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4213 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4214 }
4215
4216 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4217 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4218
4219 /* Set the accessed bit before committing the result into SS. */
4220 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4221 {
4222 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4223 if (rcStrict != VINF_SUCCESS)
4224 return rcStrict;
4225 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4226 }
4227
4228 /* Commit SS. */
4229 pCtx->ss.Sel = uNewSS;
4230 pCtx->ss.ValidSel = uNewSS;
4231 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4232 pCtx->ss.u32Limit = cbLimit;
4233 pCtx->ss.u64Base = u64Base;
4234 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4236
4237 /* CPL has changed, update IEM before loading rest of segments. */
4238 pVCpu->iem.s.uCpl = uNewCpl;
4239
4240 /*
4241 * Load the data segments for the new task.
4242 */
4243 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4244 if (rcStrict != VINF_SUCCESS)
4245 return rcStrict;
4246 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4247 if (rcStrict != VINF_SUCCESS)
4248 return rcStrict;
4249 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4250 if (rcStrict != VINF_SUCCESS)
4251 return rcStrict;
4252 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4253 if (rcStrict != VINF_SUCCESS)
4254 return rcStrict;
4255
4256 /*
4257 * Load the code segment for the new task.
4258 */
4259 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4260 {
4261 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4262 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4263 }
4264
4265 /* Fetch the descriptor. */
4266 IEMSELDESC DescCS;
4267 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4268 if (rcStrict != VINF_SUCCESS)
4269 {
4270 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4271 return rcStrict;
4272 }
4273
4274 /* CS must be a code segment. */
4275 if ( !DescCS.Legacy.Gen.u1DescType
4276 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4277 {
4278 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4279 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4280 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4281 }
4282
4283 /* For conforming CS, DPL must be less than or equal to the RPL. */
4284 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4285 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4286 {
4287 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4288 DescCS.Legacy.Gen.u2Dpl));
4289 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4290 }
4291
4292 /* For non-conforming CS, DPL must match RPL. */
4293 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4294 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4295 {
4296 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4297 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4298 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4299 }
4300
4301 /* Is it there? */
4302 if (!DescCS.Legacy.Gen.u1Present)
4303 {
4304 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4305 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4306 }
4307
4308 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4309 u64Base = X86DESC_BASE(&DescCS.Legacy);
4310
4311 /* Set the accessed bit before committing the result into CS. */
4312 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4313 {
4314 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4315 if (rcStrict != VINF_SUCCESS)
4316 return rcStrict;
4317 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4318 }
4319
4320 /* Commit CS. */
4321 pCtx->cs.Sel = uNewCS;
4322 pCtx->cs.ValidSel = uNewCS;
4323 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4324 pCtx->cs.u32Limit = cbLimit;
4325 pCtx->cs.u64Base = u64Base;
4326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4328 }
4329
4330 /** @todo Debug trap. */
4331 if (fIsNewTSS386 && fNewDebugTrap)
4332 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4333
4334 /*
4335 * Construct the error code masks based on what caused this task switch.
4336 * See Intel Instruction reference for INT.
4337 */
4338 uint16_t uExt;
4339 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4340 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4341 {
4342 uExt = 1;
4343 }
4344 else
4345 uExt = 0;
4346
4347 /*
4348 * Push any error code on to the new stack.
4349 */
4350 if (fFlags & IEM_XCPT_FLAGS_ERR)
4351 {
4352 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4353 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4354 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4355
4356 /* Check that there is sufficient space on the stack. */
4357 /** @todo Factor out segment limit checking for normal/expand down segments
4358 * into a separate function. */
4359 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4360 {
4361 if ( pCtx->esp - 1 > cbLimitSS
4362 || pCtx->esp < cbStackFrame)
4363 {
4364 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4365 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4366 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4367 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4368 }
4369 }
4370 else
4371 {
4372 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4373 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4374 {
4375 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4376 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4377 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4378 }
4379 }
4380
4381
4382 if (fIsNewTSS386)
4383 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4384 else
4385 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4386 if (rcStrict != VINF_SUCCESS)
4387 {
4388 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4389 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4390 return rcStrict;
4391 }
4392 }
4393
4394 /* Check the new EIP against the new CS limit. */
4395 if (pCtx->eip > pCtx->cs.u32Limit)
4396 {
4397 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4398 pCtx->eip, pCtx->cs.u32Limit));
4399 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4400 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4401 }
4402
4403 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4404 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Implements exceptions and interrupts for protected mode.
4410 *
4411 * @returns VBox strict status code.
4412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4413 * @param pCtx The CPU context.
4414 * @param cbInstr The number of bytes to offset rIP by in the return
4415 * address.
4416 * @param u8Vector The interrupt / exception vector number.
4417 * @param fFlags The flags.
4418 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4419 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4420 */
4421IEM_STATIC VBOXSTRICTRC
4422iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4423 PCPUMCTX pCtx,
4424 uint8_t cbInstr,
4425 uint8_t u8Vector,
4426 uint32_t fFlags,
4427 uint16_t uErr,
4428 uint64_t uCr2)
4429{
4430 /*
4431 * Read the IDT entry.
4432 */
4433 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4434 {
4435 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4436 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4437 }
4438 X86DESC Idte;
4439 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4440 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4441 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4442 return rcStrict;
4443 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4444 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4445 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4446
4447 /*
4448 * Check the descriptor type, DPL and such.
4449 * ASSUMES this is done in the same order as described for call-gate calls.
4450 */
4451 if (Idte.Gate.u1DescType)
4452 {
4453 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4454 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4455 }
4456 bool fTaskGate = false;
4457 uint8_t f32BitGate = true;
4458 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4459 switch (Idte.Gate.u4Type)
4460 {
4461 case X86_SEL_TYPE_SYS_UNDEFINED:
4462 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4463 case X86_SEL_TYPE_SYS_LDT:
4464 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4465 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4466 case X86_SEL_TYPE_SYS_UNDEFINED2:
4467 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4468 case X86_SEL_TYPE_SYS_UNDEFINED3:
4469 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4470 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4471 case X86_SEL_TYPE_SYS_UNDEFINED4:
4472 {
4473 /** @todo check what actually happens when the type is wrong...
4474 * esp. call gates. */
4475 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4476 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4477 }
4478
4479 case X86_SEL_TYPE_SYS_286_INT_GATE:
4480 f32BitGate = false;
4481 /* fall thru */
4482 case X86_SEL_TYPE_SYS_386_INT_GATE:
4483 fEflToClear |= X86_EFL_IF;
4484 break;
4485
4486 case X86_SEL_TYPE_SYS_TASK_GATE:
4487 fTaskGate = true;
4488#ifndef IEM_IMPLEMENTS_TASKSWITCH
4489 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4490#endif
4491 break;
4492
4493 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4494 f32BitGate = false;
4495 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4496 break;
4497
4498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4499 }
4500
4501 /* Check DPL against CPL if applicable. */
4502 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4503 {
4504 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4505 {
4506 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4507 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4508 }
4509 }
4510
4511 /* Is it there? */
4512 if (!Idte.Gate.u1Present)
4513 {
4514 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4515 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4516 }
4517
4518 /* Is it a task-gate? */
4519 if (fTaskGate)
4520 {
4521 /*
4522 * Construct the error code masks based on what caused this task switch.
4523 * See Intel Instruction reference for INT.
4524 */
4525 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4526 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4527 RTSEL SelTSS = Idte.Gate.u16Sel;
4528
4529 /*
4530 * Fetch the TSS descriptor in the GDT.
4531 */
4532 IEMSELDESC DescTSS;
4533 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4534 if (rcStrict != VINF_SUCCESS)
4535 {
4536 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4537 VBOXSTRICTRC_VAL(rcStrict)));
4538 return rcStrict;
4539 }
4540
4541 /* The TSS descriptor must be a system segment and be available (not busy). */
4542 if ( DescTSS.Legacy.Gen.u1DescType
4543 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4544 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4545 {
4546 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4547 u8Vector, SelTSS, DescTSS.Legacy.au64));
4548 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4549 }
4550
4551 /* The TSS must be present. */
4552 if (!DescTSS.Legacy.Gen.u1Present)
4553 {
4554 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4555 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4556 }
4557
4558 /* Do the actual task switch. */
4559 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4560 }
4561
4562 /* A null CS is bad. */
4563 RTSEL NewCS = Idte.Gate.u16Sel;
4564 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4565 {
4566 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4567 return iemRaiseGeneralProtectionFault0(pVCpu);
4568 }
4569
4570 /* Fetch the descriptor for the new CS. */
4571 IEMSELDESC DescCS;
4572 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4573 if (rcStrict != VINF_SUCCESS)
4574 {
4575 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4576 return rcStrict;
4577 }
4578
4579 /* Must be a code segment. */
4580 if (!DescCS.Legacy.Gen.u1DescType)
4581 {
4582 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4583 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4584 }
4585 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4586 {
4587 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4588 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4589 }
4590
4591 /* Don't allow lowering the privilege level. */
4592 /** @todo Does the lowering of privileges apply to software interrupts
4593 * only? This has bearings on the more-privileged or
4594 * same-privilege stack behavior further down. A testcase would
4595 * be nice. */
4596 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4597 {
4598 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4599 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4600 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4601 }
4602
4603 /* Make sure the selector is present. */
4604 if (!DescCS.Legacy.Gen.u1Present)
4605 {
4606 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4607 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4608 }
4609
4610 /* Check the new EIP against the new CS limit. */
4611 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4612 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4613 ? Idte.Gate.u16OffsetLow
4614 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4615 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4616 if (uNewEip > cbLimitCS)
4617 {
4618 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4619 u8Vector, uNewEip, cbLimitCS, NewCS));
4620 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4621 }
4622 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4623
4624 /* Calc the flag image to push. */
4625 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4626 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4627 fEfl &= ~X86_EFL_RF;
4628 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4629 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4630
4631 /* From V8086 mode only go to CPL 0. */
4632 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4633 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4634 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4635 {
4636 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4637 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4638 }
4639
4640 /*
4641 * If the privilege level changes, we need to get a new stack from the TSS.
4642 * This in turns means validating the new SS and ESP...
4643 */
4644 if (uNewCpl != pVCpu->iem.s.uCpl)
4645 {
4646 RTSEL NewSS;
4647 uint32_t uNewEsp;
4648 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4649 if (rcStrict != VINF_SUCCESS)
4650 return rcStrict;
4651
4652 IEMSELDESC DescSS;
4653 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4654 if (rcStrict != VINF_SUCCESS)
4655 return rcStrict;
4656 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4657 if (!DescSS.Legacy.Gen.u1DefBig)
4658 {
4659 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4660 uNewEsp = (uint16_t)uNewEsp;
4661 }
4662
4663 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4664
4665 /* Check that there is sufficient space for the stack frame. */
4666 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4667 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4668 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4669 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4670
4671 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4672 {
4673 if ( uNewEsp - 1 > cbLimitSS
4674 || uNewEsp < cbStackFrame)
4675 {
4676 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4677 u8Vector, NewSS, uNewEsp, cbStackFrame));
4678 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4679 }
4680 }
4681 else
4682 {
4683 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4684 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4685 {
4686 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4687 u8Vector, NewSS, uNewEsp, cbStackFrame));
4688 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4689 }
4690 }
4691
4692 /*
4693 * Start making changes.
4694 */
4695
4696 /* Set the new CPL so that stack accesses use it. */
4697 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4698 pVCpu->iem.s.uCpl = uNewCpl;
4699
4700 /* Create the stack frame. */
4701 RTPTRUNION uStackFrame;
4702 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4703 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4704 if (rcStrict != VINF_SUCCESS)
4705 return rcStrict;
4706 void * const pvStackFrame = uStackFrame.pv;
4707 if (f32BitGate)
4708 {
4709 if (fFlags & IEM_XCPT_FLAGS_ERR)
4710 *uStackFrame.pu32++ = uErr;
4711 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4712 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4713 uStackFrame.pu32[2] = fEfl;
4714 uStackFrame.pu32[3] = pCtx->esp;
4715 uStackFrame.pu32[4] = pCtx->ss.Sel;
4716 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4717 if (fEfl & X86_EFL_VM)
4718 {
4719 uStackFrame.pu32[1] = pCtx->cs.Sel;
4720 uStackFrame.pu32[5] = pCtx->es.Sel;
4721 uStackFrame.pu32[6] = pCtx->ds.Sel;
4722 uStackFrame.pu32[7] = pCtx->fs.Sel;
4723 uStackFrame.pu32[8] = pCtx->gs.Sel;
4724 }
4725 }
4726 else
4727 {
4728 if (fFlags & IEM_XCPT_FLAGS_ERR)
4729 *uStackFrame.pu16++ = uErr;
4730 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4731 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4732 uStackFrame.pu16[2] = fEfl;
4733 uStackFrame.pu16[3] = pCtx->sp;
4734 uStackFrame.pu16[4] = pCtx->ss.Sel;
4735 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4736 if (fEfl & X86_EFL_VM)
4737 {
4738 uStackFrame.pu16[1] = pCtx->cs.Sel;
4739 uStackFrame.pu16[5] = pCtx->es.Sel;
4740 uStackFrame.pu16[6] = pCtx->ds.Sel;
4741 uStackFrame.pu16[7] = pCtx->fs.Sel;
4742 uStackFrame.pu16[8] = pCtx->gs.Sel;
4743 }
4744 }
4745 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4746 if (rcStrict != VINF_SUCCESS)
4747 return rcStrict;
4748
4749 /* Mark the selectors 'accessed' (hope this is the correct time). */
4750 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4751 * after pushing the stack frame? (Write protect the gdt + stack to
4752 * find out.) */
4753 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4754 {
4755 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4756 if (rcStrict != VINF_SUCCESS)
4757 return rcStrict;
4758 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4759 }
4760
4761 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4762 {
4763 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4764 if (rcStrict != VINF_SUCCESS)
4765 return rcStrict;
4766 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4767 }
4768
4769 /*
4770 * Start comitting the register changes (joins with the DPL=CPL branch).
4771 */
4772 pCtx->ss.Sel = NewSS;
4773 pCtx->ss.ValidSel = NewSS;
4774 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4775 pCtx->ss.u32Limit = cbLimitSS;
4776 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4777 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4778 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4779 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4780 * SP is loaded).
4781 * Need to check the other combinations too:
4782 * - 16-bit TSS, 32-bit handler
4783 * - 32-bit TSS, 16-bit handler */
4784 if (!pCtx->ss.Attr.n.u1DefBig)
4785 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4786 else
4787 pCtx->rsp = uNewEsp - cbStackFrame;
4788
4789 if (fEfl & X86_EFL_VM)
4790 {
4791 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4792 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4793 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4794 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4795 }
4796 }
4797 /*
4798 * Same privilege, no stack change and smaller stack frame.
4799 */
4800 else
4801 {
4802 uint64_t uNewRsp;
4803 RTPTRUNION uStackFrame;
4804 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4805 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4806 if (rcStrict != VINF_SUCCESS)
4807 return rcStrict;
4808 void * const pvStackFrame = uStackFrame.pv;
4809
4810 if (f32BitGate)
4811 {
4812 if (fFlags & IEM_XCPT_FLAGS_ERR)
4813 *uStackFrame.pu32++ = uErr;
4814 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4815 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4816 uStackFrame.pu32[2] = fEfl;
4817 }
4818 else
4819 {
4820 if (fFlags & IEM_XCPT_FLAGS_ERR)
4821 *uStackFrame.pu16++ = uErr;
4822 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4823 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4824 uStackFrame.pu16[2] = fEfl;
4825 }
4826 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4827 if (rcStrict != VINF_SUCCESS)
4828 return rcStrict;
4829
4830 /* Mark the CS selector as 'accessed'. */
4831 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4832 {
4833 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4834 if (rcStrict != VINF_SUCCESS)
4835 return rcStrict;
4836 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4837 }
4838
4839 /*
4840 * Start committing the register changes (joins with the other branch).
4841 */
4842 pCtx->rsp = uNewRsp;
4843 }
4844
4845 /* ... register committing continues. */
4846 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4847 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4848 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4849 pCtx->cs.u32Limit = cbLimitCS;
4850 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4851 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4852
4853 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4854 fEfl &= ~fEflToClear;
4855 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4856
4857 if (fFlags & IEM_XCPT_FLAGS_CR2)
4858 pCtx->cr2 = uCr2;
4859
4860 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4861 iemRaiseXcptAdjustState(pCtx, u8Vector);
4862
4863 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4864}
4865
4866
4867/**
4868 * Implements exceptions and interrupts for long mode.
4869 *
4870 * @returns VBox strict status code.
4871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4872 * @param pCtx The CPU context.
4873 * @param cbInstr The number of bytes to offset rIP by in the return
4874 * address.
4875 * @param u8Vector The interrupt / exception vector number.
4876 * @param fFlags The flags.
4877 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4878 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4879 */
4880IEM_STATIC VBOXSTRICTRC
4881iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4882 PCPUMCTX pCtx,
4883 uint8_t cbInstr,
4884 uint8_t u8Vector,
4885 uint32_t fFlags,
4886 uint16_t uErr,
4887 uint64_t uCr2)
4888{
4889 /*
4890 * Read the IDT entry.
4891 */
4892 uint16_t offIdt = (uint16_t)u8Vector << 4;
4893 if (pCtx->idtr.cbIdt < offIdt + 7)
4894 {
4895 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4896 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4897 }
4898 X86DESC64 Idte;
4899 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4900 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4901 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4902 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4903 return rcStrict;
4904 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4905 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4906 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4907
4908 /*
4909 * Check the descriptor type, DPL and such.
4910 * ASSUMES this is done in the same order as described for call-gate calls.
4911 */
4912 if (Idte.Gate.u1DescType)
4913 {
4914 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4915 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4916 }
4917 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4918 switch (Idte.Gate.u4Type)
4919 {
4920 case AMD64_SEL_TYPE_SYS_INT_GATE:
4921 fEflToClear |= X86_EFL_IF;
4922 break;
4923 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4924 break;
4925
4926 default:
4927 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4928 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4929 }
4930
4931 /* Check DPL against CPL if applicable. */
4932 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4933 {
4934 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4935 {
4936 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4937 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4938 }
4939 }
4940
4941 /* Is it there? */
4942 if (!Idte.Gate.u1Present)
4943 {
4944 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4945 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4946 }
4947
4948 /* A null CS is bad. */
4949 RTSEL NewCS = Idte.Gate.u16Sel;
4950 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4951 {
4952 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4953 return iemRaiseGeneralProtectionFault0(pVCpu);
4954 }
4955
4956 /* Fetch the descriptor for the new CS. */
4957 IEMSELDESC DescCS;
4958 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4959 if (rcStrict != VINF_SUCCESS)
4960 {
4961 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4962 return rcStrict;
4963 }
4964
4965 /* Must be a 64-bit code segment. */
4966 if (!DescCS.Long.Gen.u1DescType)
4967 {
4968 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4969 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4970 }
4971 if ( !DescCS.Long.Gen.u1Long
4972 || DescCS.Long.Gen.u1DefBig
4973 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4974 {
4975 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4976 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4977 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4978 }
4979
4980 /* Don't allow lowering the privilege level. For non-conforming CS
4981 selectors, the CS.DPL sets the privilege level the trap/interrupt
4982 handler runs at. For conforming CS selectors, the CPL remains
4983 unchanged, but the CS.DPL must be <= CPL. */
4984 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4985 * when CPU in Ring-0. Result \#GP? */
4986 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4987 {
4988 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4989 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4990 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4991 }
4992
4993
4994 /* Make sure the selector is present. */
4995 if (!DescCS.Legacy.Gen.u1Present)
4996 {
4997 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4998 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4999 }
5000
5001 /* Check that the new RIP is canonical. */
5002 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5003 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5004 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5005 if (!IEM_IS_CANONICAL(uNewRip))
5006 {
5007 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5008 return iemRaiseGeneralProtectionFault0(pVCpu);
5009 }
5010
5011 /*
5012 * If the privilege level changes or if the IST isn't zero, we need to get
5013 * a new stack from the TSS.
5014 */
5015 uint64_t uNewRsp;
5016 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5017 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5018 if ( uNewCpl != pVCpu->iem.s.uCpl
5019 || Idte.Gate.u3IST != 0)
5020 {
5021 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5022 if (rcStrict != VINF_SUCCESS)
5023 return rcStrict;
5024 }
5025 else
5026 uNewRsp = pCtx->rsp;
5027 uNewRsp &= ~(uint64_t)0xf;
5028
5029 /*
5030 * Calc the flag image to push.
5031 */
5032 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5033 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5034 fEfl &= ~X86_EFL_RF;
5035 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5036 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5037
5038 /*
5039 * Start making changes.
5040 */
5041 /* Set the new CPL so that stack accesses use it. */
5042 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5043 pVCpu->iem.s.uCpl = uNewCpl;
5044
5045 /* Create the stack frame. */
5046 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5047 RTPTRUNION uStackFrame;
5048 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5049 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5050 if (rcStrict != VINF_SUCCESS)
5051 return rcStrict;
5052 void * const pvStackFrame = uStackFrame.pv;
5053
5054 if (fFlags & IEM_XCPT_FLAGS_ERR)
5055 *uStackFrame.pu64++ = uErr;
5056 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5057 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5058 uStackFrame.pu64[2] = fEfl;
5059 uStackFrame.pu64[3] = pCtx->rsp;
5060 uStackFrame.pu64[4] = pCtx->ss.Sel;
5061 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064
5065 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5066 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5067 * after pushing the stack frame? (Write protect the gdt + stack to
5068 * find out.) */
5069 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5070 {
5071 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5075 }
5076
5077 /*
5078 * Start comitting the register changes.
5079 */
5080 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5081 * hidden registers when interrupting 32-bit or 16-bit code! */
5082 if (uNewCpl != uOldCpl)
5083 {
5084 pCtx->ss.Sel = 0 | uNewCpl;
5085 pCtx->ss.ValidSel = 0 | uNewCpl;
5086 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5087 pCtx->ss.u32Limit = UINT32_MAX;
5088 pCtx->ss.u64Base = 0;
5089 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5090 }
5091 pCtx->rsp = uNewRsp - cbStackFrame;
5092 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5093 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5094 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5095 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5096 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5097 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5098 pCtx->rip = uNewRip;
5099
5100 fEfl &= ~fEflToClear;
5101 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5102
5103 if (fFlags & IEM_XCPT_FLAGS_CR2)
5104 pCtx->cr2 = uCr2;
5105
5106 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5107 iemRaiseXcptAdjustState(pCtx, u8Vector);
5108
5109 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5110}
5111
5112
5113/**
5114 * Implements exceptions and interrupts.
5115 *
5116 * All exceptions and interrupts goes thru this function!
5117 *
5118 * @returns VBox strict status code.
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param cbInstr The number of bytes to offset rIP by in the return
5121 * address.
5122 * @param u8Vector The interrupt / exception vector number.
5123 * @param fFlags The flags.
5124 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5125 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5126 */
5127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5128iemRaiseXcptOrInt(PVMCPU pVCpu,
5129 uint8_t cbInstr,
5130 uint8_t u8Vector,
5131 uint32_t fFlags,
5132 uint16_t uErr,
5133 uint64_t uCr2)
5134{
5135 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5136#ifdef IN_RING0
5137 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5138 AssertRCReturn(rc, rc);
5139#endif
5140
5141#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5142 /*
5143 * Flush prefetch buffer
5144 */
5145 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5146#endif
5147
5148 /*
5149 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5150 */
5151 if ( pCtx->eflags.Bits.u1VM
5152 && pCtx->eflags.Bits.u2IOPL != 3
5153 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5154 && (pCtx->cr0 & X86_CR0_PE) )
5155 {
5156 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5157 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5158 u8Vector = X86_XCPT_GP;
5159 uErr = 0;
5160 }
5161#ifdef DBGFTRACE_ENABLED
5162 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5163 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5164 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5165#endif
5166
5167 /*
5168 * Do recursion accounting.
5169 */
5170 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5171 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5172 if (pVCpu->iem.s.cXcptRecursions == 0)
5173 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5174 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5175 else
5176 {
5177 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5178 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5179
5180 /** @todo double and tripple faults. */
5181 if (pVCpu->iem.s.cXcptRecursions >= 3)
5182 {
5183#ifdef DEBUG_bird
5184 AssertFailed();
5185#endif
5186 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5187 }
5188
5189 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5190 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5191 {
5192 ....
5193 } */
5194 }
5195 pVCpu->iem.s.cXcptRecursions++;
5196 pVCpu->iem.s.uCurXcpt = u8Vector;
5197 pVCpu->iem.s.fCurXcpt = fFlags;
5198
5199 /*
5200 * Extensive logging.
5201 */
5202#if defined(LOG_ENABLED) && defined(IN_RING3)
5203 if (LogIs3Enabled())
5204 {
5205 PVM pVM = pVCpu->CTX_SUFF(pVM);
5206 char szRegs[4096];
5207 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5208 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5209 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5210 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5211 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5212 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5213 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5214 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5215 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5216 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5217 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5218 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5219 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5220 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5221 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5222 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5223 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5224 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5225 " efer=%016VR{efer}\n"
5226 " pat=%016VR{pat}\n"
5227 " sf_mask=%016VR{sf_mask}\n"
5228 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5229 " lstar=%016VR{lstar}\n"
5230 " star=%016VR{star} cstar=%016VR{cstar}\n"
5231 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5232 );
5233
5234 char szInstr[256];
5235 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5236 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5237 szInstr, sizeof(szInstr), NULL);
5238 Log3(("%s%s\n", szRegs, szInstr));
5239 }
5240#endif /* LOG_ENABLED */
5241
5242 /*
5243 * Call the mode specific worker function.
5244 */
5245 VBOXSTRICTRC rcStrict;
5246 if (!(pCtx->cr0 & X86_CR0_PE))
5247 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5248 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5249 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5250 else
5251 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5252
5253 /* Flush the prefetch buffer. */
5254#ifdef IEM_WITH_CODE_TLB
5255 pVCpu->iem.s.pbInstrBuf = NULL;
5256#else
5257 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5258#endif
5259
5260 /*
5261 * Unwind.
5262 */
5263 pVCpu->iem.s.cXcptRecursions--;
5264 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5265 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5266 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5267 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5268 return rcStrict;
5269}
5270
5271#ifdef IEM_WITH_SETJMP
5272/**
5273 * See iemRaiseXcptOrInt. Will not return.
5274 */
5275IEM_STATIC DECL_NO_RETURN(void)
5276iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5277 uint8_t cbInstr,
5278 uint8_t u8Vector,
5279 uint32_t fFlags,
5280 uint16_t uErr,
5281 uint64_t uCr2)
5282{
5283 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5284 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5285}
5286#endif
5287
5288
5289/** \#DE - 00. */
5290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5291{
5292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5293}
5294
5295
5296/** \#DB - 01.
5297 * @note This automatically clear DR7.GD. */
5298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5299{
5300 /** @todo set/clear RF. */
5301 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5302 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5303}
5304
5305
5306/** \#BR - 05. */
5307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5308{
5309 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5310}
5311
5312
5313/** \#UD - 06. */
5314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5315{
5316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5317}
5318
5319
5320/** \#NM - 07. */
5321DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5322{
5323 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5324}
5325
5326
5327/** \#TS(err) - 0a. */
5328DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5329{
5330 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5331}
5332
5333
5334/** \#TS(tr) - 0a. */
5335DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5336{
5337 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5338 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5339}
5340
5341
5342/** \#TS(0) - 0a. */
5343DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5344{
5345 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5346 0, 0);
5347}
5348
5349
5350/** \#TS(err) - 0a. */
5351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5352{
5353 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5354 uSel & X86_SEL_MASK_OFF_RPL, 0);
5355}
5356
5357
5358/** \#NP(err) - 0b. */
5359DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5360{
5361 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5362}
5363
5364
5365/** \#NP(sel) - 0b. */
5366DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5367{
5368 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5369 uSel & ~X86_SEL_RPL, 0);
5370}
5371
5372
5373/** \#SS(seg) - 0c. */
5374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5375{
5376 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5377 uSel & ~X86_SEL_RPL, 0);
5378}
5379
5380
5381/** \#SS(err) - 0c. */
5382DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5383{
5384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5385}
5386
5387
5388/** \#GP(n) - 0d. */
5389DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5390{
5391 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5392}
5393
5394
5395/** \#GP(0) - 0d. */
5396DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5397{
5398 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5399}
5400
5401#ifdef IEM_WITH_SETJMP
5402/** \#GP(0) - 0d. */
5403DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5404{
5405 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5406}
5407#endif
5408
5409
5410/** \#GP(sel) - 0d. */
5411DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5412{
5413 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5414 Sel & ~X86_SEL_RPL, 0);
5415}
5416
5417
5418/** \#GP(0) - 0d. */
5419DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5420{
5421 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5422}
5423
5424
5425/** \#GP(sel) - 0d. */
5426DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5427{
5428 NOREF(iSegReg); NOREF(fAccess);
5429 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5430 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5431}
5432
5433#ifdef IEM_WITH_SETJMP
5434/** \#GP(sel) - 0d, longjmp. */
5435DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5436{
5437 NOREF(iSegReg); NOREF(fAccess);
5438 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5439 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5440}
5441#endif
5442
5443/** \#GP(sel) - 0d. */
5444DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5445{
5446 NOREF(Sel);
5447 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5448}
5449
5450#ifdef IEM_WITH_SETJMP
5451/** \#GP(sel) - 0d, longjmp. */
5452DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5453{
5454 NOREF(Sel);
5455 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5456}
5457#endif
5458
5459
5460/** \#GP(sel) - 0d. */
5461DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5462{
5463 NOREF(iSegReg); NOREF(fAccess);
5464 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5465}
5466
5467#ifdef IEM_WITH_SETJMP
5468/** \#GP(sel) - 0d, longjmp. */
5469DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5470 uint32_t fAccess)
5471{
5472 NOREF(iSegReg); NOREF(fAccess);
5473 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5474}
5475#endif
5476
5477
5478/** \#PF(n) - 0e. */
5479DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5480{
5481 uint16_t uErr;
5482 switch (rc)
5483 {
5484 case VERR_PAGE_NOT_PRESENT:
5485 case VERR_PAGE_TABLE_NOT_PRESENT:
5486 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5487 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5488 uErr = 0;
5489 break;
5490
5491 default:
5492 AssertMsgFailed(("%Rrc\n", rc));
5493 /* fall thru */
5494 case VERR_ACCESS_DENIED:
5495 uErr = X86_TRAP_PF_P;
5496 break;
5497
5498 /** @todo reserved */
5499 }
5500
5501 if (pVCpu->iem.s.uCpl == 3)
5502 uErr |= X86_TRAP_PF_US;
5503
5504 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5505 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5506 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5507 uErr |= X86_TRAP_PF_ID;
5508
5509#if 0 /* This is so much non-sense, really. Why was it done like that? */
5510 /* Note! RW access callers reporting a WRITE protection fault, will clear
5511 the READ flag before calling. So, read-modify-write accesses (RW)
5512 can safely be reported as READ faults. */
5513 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5514 uErr |= X86_TRAP_PF_RW;
5515#else
5516 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5517 {
5518 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5519 uErr |= X86_TRAP_PF_RW;
5520 }
5521#endif
5522
5523 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5524 uErr, GCPtrWhere);
5525}
5526
5527#ifdef IEM_WITH_SETJMP
5528/** \#PF(n) - 0e, longjmp. */
5529IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5530{
5531 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5532}
5533#endif
5534
5535
5536/** \#MF(0) - 10. */
5537DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5538{
5539 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5540}
5541
5542
5543/** \#AC(0) - 11. */
5544DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5545{
5546 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5547}
5548
5549
5550/**
5551 * Macro for calling iemCImplRaiseDivideError().
5552 *
5553 * This enables us to add/remove arguments and force different levels of
5554 * inlining as we wish.
5555 *
5556 * @return Strict VBox status code.
5557 */
5558#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5559IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5560{
5561 NOREF(cbInstr);
5562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5563}
5564
5565
5566/**
5567 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5568 *
5569 * This enables us to add/remove arguments and force different levels of
5570 * inlining as we wish.
5571 *
5572 * @return Strict VBox status code.
5573 */
5574#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5575IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5576{
5577 NOREF(cbInstr);
5578 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5579}
5580
5581
5582/**
5583 * Macro for calling iemCImplRaiseInvalidOpcode().
5584 *
5585 * This enables us to add/remove arguments and force different levels of
5586 * inlining as we wish.
5587 *
5588 * @return Strict VBox status code.
5589 */
5590#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5591IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5592{
5593 NOREF(cbInstr);
5594 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5595}
5596
5597
5598/** @} */
5599
5600
5601/*
5602 *
5603 * Helpers routines.
5604 * Helpers routines.
5605 * Helpers routines.
5606 *
5607 */
5608
5609/**
5610 * Recalculates the effective operand size.
5611 *
5612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5613 */
5614IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5615{
5616 switch (pVCpu->iem.s.enmCpuMode)
5617 {
5618 case IEMMODE_16BIT:
5619 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5620 break;
5621 case IEMMODE_32BIT:
5622 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5623 break;
5624 case IEMMODE_64BIT:
5625 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5626 {
5627 case 0:
5628 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5629 break;
5630 case IEM_OP_PRF_SIZE_OP:
5631 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5632 break;
5633 case IEM_OP_PRF_SIZE_REX_W:
5634 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5635 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5636 break;
5637 }
5638 break;
5639 default:
5640 AssertFailed();
5641 }
5642}
5643
5644
5645/**
5646 * Sets the default operand size to 64-bit and recalculates the effective
5647 * operand size.
5648 *
5649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5650 */
5651IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5652{
5653 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5654 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5655 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5656 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5657 else
5658 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5659}
5660
5661
5662/*
5663 *
5664 * Common opcode decoders.
5665 * Common opcode decoders.
5666 * Common opcode decoders.
5667 *
5668 */
5669//#include <iprt/mem.h>
5670
5671/**
5672 * Used to add extra details about a stub case.
5673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5674 */
5675IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5676{
5677#if defined(LOG_ENABLED) && defined(IN_RING3)
5678 PVM pVM = pVCpu->CTX_SUFF(pVM);
5679 char szRegs[4096];
5680 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5681 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5682 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5683 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5684 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5685 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5686 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5687 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5688 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5689 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5690 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5691 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5692 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5693 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5694 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5695 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5696 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5697 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5698 " efer=%016VR{efer}\n"
5699 " pat=%016VR{pat}\n"
5700 " sf_mask=%016VR{sf_mask}\n"
5701 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5702 " lstar=%016VR{lstar}\n"
5703 " star=%016VR{star} cstar=%016VR{cstar}\n"
5704 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5705 );
5706
5707 char szInstr[256];
5708 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5709 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5710 szInstr, sizeof(szInstr), NULL);
5711
5712 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5713#else
5714 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5715#endif
5716}
5717
5718/**
5719 * Complains about a stub.
5720 *
5721 * Providing two versions of this macro, one for daily use and one for use when
5722 * working on IEM.
5723 */
5724#if 0
5725# define IEMOP_BITCH_ABOUT_STUB() \
5726 do { \
5727 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5728 iemOpStubMsg2(pVCpu); \
5729 RTAssertPanic(); \
5730 } while (0)
5731#else
5732# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5733#endif
5734
5735/** Stubs an opcode. */
5736#define FNIEMOP_STUB(a_Name) \
5737 FNIEMOP_DEF(a_Name) \
5738 { \
5739 RT_NOREF_PV(pVCpu); \
5740 IEMOP_BITCH_ABOUT_STUB(); \
5741 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5742 } \
5743 typedef int ignore_semicolon
5744
5745/** Stubs an opcode. */
5746#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5747 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5748 { \
5749 RT_NOREF_PV(pVCpu); \
5750 RT_NOREF_PV(a_Name0); \
5751 IEMOP_BITCH_ABOUT_STUB(); \
5752 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5753 } \
5754 typedef int ignore_semicolon
5755
5756/** Stubs an opcode which currently should raise \#UD. */
5757#define FNIEMOP_UD_STUB(a_Name) \
5758 FNIEMOP_DEF(a_Name) \
5759 { \
5760 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5761 return IEMOP_RAISE_INVALID_OPCODE(); \
5762 } \
5763 typedef int ignore_semicolon
5764
5765/** Stubs an opcode which currently should raise \#UD. */
5766#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5767 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5768 { \
5769 RT_NOREF_PV(pVCpu); \
5770 RT_NOREF_PV(a_Name0); \
5771 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5772 return IEMOP_RAISE_INVALID_OPCODE(); \
5773 } \
5774 typedef int ignore_semicolon
5775
5776
5777
5778/** @name Register Access.
5779 * @{
5780 */
5781
5782/**
5783 * Gets a reference (pointer) to the specified hidden segment register.
5784 *
5785 * @returns Hidden register reference.
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param iSegReg The segment register.
5788 */
5789IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5790{
5791 Assert(iSegReg < X86_SREG_COUNT);
5792 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5793 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5794
5795#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5796 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5797 { /* likely */ }
5798 else
5799 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5800#else
5801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5802#endif
5803 return pSReg;
5804}
5805
5806
5807/**
5808 * Ensures that the given hidden segment register is up to date.
5809 *
5810 * @returns Hidden register reference.
5811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5812 * @param pSReg The segment register.
5813 */
5814IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5815{
5816#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5817 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5818 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5819#else
5820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5821 NOREF(pVCpu);
5822#endif
5823 return pSReg;
5824}
5825
5826
5827/**
5828 * Gets a reference (pointer) to the specified segment register (the selector
5829 * value).
5830 *
5831 * @returns Pointer to the selector variable.
5832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5833 * @param iSegReg The segment register.
5834 */
5835DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5836{
5837 Assert(iSegReg < X86_SREG_COUNT);
5838 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5839 return &pCtx->aSRegs[iSegReg].Sel;
5840}
5841
5842
5843/**
5844 * Fetches the selector value of a segment register.
5845 *
5846 * @returns The selector value.
5847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5848 * @param iSegReg The segment register.
5849 */
5850DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5851{
5852 Assert(iSegReg < X86_SREG_COUNT);
5853 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5854}
5855
5856
5857/**
5858 * Gets a reference (pointer) to the specified general purpose register.
5859 *
5860 * @returns Register reference.
5861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5862 * @param iReg The general purpose register.
5863 */
5864DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5865{
5866 Assert(iReg < 16);
5867 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5868 return &pCtx->aGRegs[iReg];
5869}
5870
5871
5872/**
5873 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5874 *
5875 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5876 *
5877 * @returns Register reference.
5878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5879 * @param iReg The register.
5880 */
5881DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5882{
5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5884 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5885 {
5886 Assert(iReg < 16);
5887 return &pCtx->aGRegs[iReg].u8;
5888 }
5889 /* high 8-bit register. */
5890 Assert(iReg < 8);
5891 return &pCtx->aGRegs[iReg & 3].bHi;
5892}
5893
5894
5895/**
5896 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5897 *
5898 * @returns Register reference.
5899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5900 * @param iReg The register.
5901 */
5902DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5903{
5904 Assert(iReg < 16);
5905 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5906 return &pCtx->aGRegs[iReg].u16;
5907}
5908
5909
5910/**
5911 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5912 *
5913 * @returns Register reference.
5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5915 * @param iReg The register.
5916 */
5917DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5918{
5919 Assert(iReg < 16);
5920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5921 return &pCtx->aGRegs[iReg].u32;
5922}
5923
5924
5925/**
5926 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5927 *
5928 * @returns Register reference.
5929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5930 * @param iReg The register.
5931 */
5932DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5933{
5934 Assert(iReg < 64);
5935 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5936 return &pCtx->aGRegs[iReg].u64;
5937}
5938
5939
5940/**
5941 * Fetches the value of a 8-bit general purpose register.
5942 *
5943 * @returns The register value.
5944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5945 * @param iReg The register.
5946 */
5947DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5948{
5949 return *iemGRegRefU8(pVCpu, iReg);
5950}
5951
5952
5953/**
5954 * Fetches the value of a 16-bit general purpose register.
5955 *
5956 * @returns The register value.
5957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5958 * @param iReg The register.
5959 */
5960DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5961{
5962 Assert(iReg < 16);
5963 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5964}
5965
5966
5967/**
5968 * Fetches the value of a 32-bit general purpose register.
5969 *
5970 * @returns The register value.
5971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5972 * @param iReg The register.
5973 */
5974DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5975{
5976 Assert(iReg < 16);
5977 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5978}
5979
5980
5981/**
5982 * Fetches the value of a 64-bit general purpose register.
5983 *
5984 * @returns The register value.
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 * @param iReg The register.
5987 */
5988DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5989{
5990 Assert(iReg < 16);
5991 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5992}
5993
5994
5995/**
5996 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5997 *
5998 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5999 * segment limit.
6000 *
6001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6002 * @param offNextInstr The offset of the next instruction.
6003 */
6004IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6005{
6006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6007 switch (pVCpu->iem.s.enmEffOpSize)
6008 {
6009 case IEMMODE_16BIT:
6010 {
6011 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6012 if ( uNewIp > pCtx->cs.u32Limit
6013 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6014 return iemRaiseGeneralProtectionFault0(pVCpu);
6015 pCtx->rip = uNewIp;
6016 break;
6017 }
6018
6019 case IEMMODE_32BIT:
6020 {
6021 Assert(pCtx->rip <= UINT32_MAX);
6022 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6023
6024 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6025 if (uNewEip > pCtx->cs.u32Limit)
6026 return iemRaiseGeneralProtectionFault0(pVCpu);
6027 pCtx->rip = uNewEip;
6028 break;
6029 }
6030
6031 case IEMMODE_64BIT:
6032 {
6033 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6034
6035 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6036 if (!IEM_IS_CANONICAL(uNewRip))
6037 return iemRaiseGeneralProtectionFault0(pVCpu);
6038 pCtx->rip = uNewRip;
6039 break;
6040 }
6041
6042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6043 }
6044
6045 pCtx->eflags.Bits.u1RF = 0;
6046
6047#ifndef IEM_WITH_CODE_TLB
6048 /* Flush the prefetch buffer. */
6049 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6050#endif
6051
6052 return VINF_SUCCESS;
6053}
6054
6055
6056/**
6057 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6058 *
6059 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6060 * segment limit.
6061 *
6062 * @returns Strict VBox status code.
6063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6064 * @param offNextInstr The offset of the next instruction.
6065 */
6066IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6067{
6068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6069 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6070
6071 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6072 if ( uNewIp > pCtx->cs.u32Limit
6073 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6074 return iemRaiseGeneralProtectionFault0(pVCpu);
6075 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6076 pCtx->rip = uNewIp;
6077 pCtx->eflags.Bits.u1RF = 0;
6078
6079#ifndef IEM_WITH_CODE_TLB
6080 /* Flush the prefetch buffer. */
6081 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6082#endif
6083
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6090 *
6091 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6092 * segment limit.
6093 *
6094 * @returns Strict VBox status code.
6095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6096 * @param offNextInstr The offset of the next instruction.
6097 */
6098IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6099{
6100 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6101 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6102
6103 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6104 {
6105 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6106
6107 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6108 if (uNewEip > pCtx->cs.u32Limit)
6109 return iemRaiseGeneralProtectionFault0(pVCpu);
6110 pCtx->rip = uNewEip;
6111 }
6112 else
6113 {
6114 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6115
6116 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6117 if (!IEM_IS_CANONICAL(uNewRip))
6118 return iemRaiseGeneralProtectionFault0(pVCpu);
6119 pCtx->rip = uNewRip;
6120 }
6121 pCtx->eflags.Bits.u1RF = 0;
6122
6123#ifndef IEM_WITH_CODE_TLB
6124 /* Flush the prefetch buffer. */
6125 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6126#endif
6127
6128 return VINF_SUCCESS;
6129}
6130
6131
6132/**
6133 * Performs a near jump to the specified address.
6134 *
6135 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6136 * segment limit.
6137 *
6138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6139 * @param uNewRip The new RIP value.
6140 */
6141IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6142{
6143 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6144 switch (pVCpu->iem.s.enmEffOpSize)
6145 {
6146 case IEMMODE_16BIT:
6147 {
6148 Assert(uNewRip <= UINT16_MAX);
6149 if ( uNewRip > pCtx->cs.u32Limit
6150 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6151 return iemRaiseGeneralProtectionFault0(pVCpu);
6152 /** @todo Test 16-bit jump in 64-bit mode. */
6153 pCtx->rip = uNewRip;
6154 break;
6155 }
6156
6157 case IEMMODE_32BIT:
6158 {
6159 Assert(uNewRip <= UINT32_MAX);
6160 Assert(pCtx->rip <= UINT32_MAX);
6161 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6162
6163 if (uNewRip > pCtx->cs.u32Limit)
6164 return iemRaiseGeneralProtectionFault0(pVCpu);
6165 pCtx->rip = uNewRip;
6166 break;
6167 }
6168
6169 case IEMMODE_64BIT:
6170 {
6171 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6172
6173 if (!IEM_IS_CANONICAL(uNewRip))
6174 return iemRaiseGeneralProtectionFault0(pVCpu);
6175 pCtx->rip = uNewRip;
6176 break;
6177 }
6178
6179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6180 }
6181
6182 pCtx->eflags.Bits.u1RF = 0;
6183
6184#ifndef IEM_WITH_CODE_TLB
6185 /* Flush the prefetch buffer. */
6186 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6187#endif
6188
6189 return VINF_SUCCESS;
6190}
6191
6192
6193/**
6194 * Get the address of the top of the stack.
6195 *
6196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6197 * @param pCtx The CPU context which SP/ESP/RSP should be
6198 * read.
6199 */
6200DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6201{
6202 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6203 return pCtx->rsp;
6204 if (pCtx->ss.Attr.n.u1DefBig)
6205 return pCtx->esp;
6206 return pCtx->sp;
6207}
6208
6209
6210/**
6211 * Updates the RIP/EIP/IP to point to the next instruction.
6212 *
6213 * This function leaves the EFLAGS.RF flag alone.
6214 *
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param cbInstr The number of bytes to add.
6217 */
6218IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6219{
6220 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6221 switch (pVCpu->iem.s.enmCpuMode)
6222 {
6223 case IEMMODE_16BIT:
6224 Assert(pCtx->rip <= UINT16_MAX);
6225 pCtx->eip += cbInstr;
6226 pCtx->eip &= UINT32_C(0xffff);
6227 break;
6228
6229 case IEMMODE_32BIT:
6230 pCtx->eip += cbInstr;
6231 Assert(pCtx->rip <= UINT32_MAX);
6232 break;
6233
6234 case IEMMODE_64BIT:
6235 pCtx->rip += cbInstr;
6236 break;
6237 default: AssertFailed();
6238 }
6239}
6240
6241
6242#if 0
6243/**
6244 * Updates the RIP/EIP/IP to point to the next instruction.
6245 *
6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6247 */
6248IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6249{
6250 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6251}
6252#endif
6253
6254
6255
6256/**
6257 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6258 *
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param cbInstr The number of bytes to add.
6261 */
6262IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6263{
6264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6265
6266 pCtx->eflags.Bits.u1RF = 0;
6267
6268 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6269#if ARCH_BITS >= 64
6270 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6271 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6272 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6273#else
6274 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6275 pCtx->rip += cbInstr;
6276 else
6277 {
6278 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6279 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6280 }
6281#endif
6282}
6283
6284
6285/**
6286 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6287 *
6288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6289 */
6290IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6291{
6292 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6293}
6294
6295
6296/**
6297 * Adds to the stack pointer.
6298 *
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param pCtx The CPU context which SP/ESP/RSP should be
6301 * updated.
6302 * @param cbToAdd The number of bytes to add (8-bit!).
6303 */
6304DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6305{
6306 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6307 pCtx->rsp += cbToAdd;
6308 else if (pCtx->ss.Attr.n.u1DefBig)
6309 pCtx->esp += cbToAdd;
6310 else
6311 pCtx->sp += cbToAdd;
6312}
6313
6314
6315/**
6316 * Subtracts from the stack pointer.
6317 *
6318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6319 * @param pCtx The CPU context which SP/ESP/RSP should be
6320 * updated.
6321 * @param cbToSub The number of bytes to subtract (8-bit!).
6322 */
6323DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6324{
6325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6326 pCtx->rsp -= cbToSub;
6327 else if (pCtx->ss.Attr.n.u1DefBig)
6328 pCtx->esp -= cbToSub;
6329 else
6330 pCtx->sp -= cbToSub;
6331}
6332
6333
6334/**
6335 * Adds to the temporary stack pointer.
6336 *
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6339 * @param cbToAdd The number of bytes to add (16-bit).
6340 * @param pCtx Where to get the current stack mode.
6341 */
6342DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6343{
6344 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6345 pTmpRsp->u += cbToAdd;
6346 else if (pCtx->ss.Attr.n.u1DefBig)
6347 pTmpRsp->DWords.dw0 += cbToAdd;
6348 else
6349 pTmpRsp->Words.w0 += cbToAdd;
6350}
6351
6352
6353/**
6354 * Subtracts from the temporary stack pointer.
6355 *
6356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6357 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6358 * @param cbToSub The number of bytes to subtract.
6359 * @param pCtx Where to get the current stack mode.
6360 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6361 * expecting that.
6362 */
6363DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6364{
6365 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6366 pTmpRsp->u -= cbToSub;
6367 else if (pCtx->ss.Attr.n.u1DefBig)
6368 pTmpRsp->DWords.dw0 -= cbToSub;
6369 else
6370 pTmpRsp->Words.w0 -= cbToSub;
6371}
6372
6373
6374/**
6375 * Calculates the effective stack address for a push of the specified size as
6376 * well as the new RSP value (upper bits may be masked).
6377 *
6378 * @returns Effective stack addressf for the push.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param pCtx Where to get the current stack mode.
6381 * @param cbItem The size of the stack item to pop.
6382 * @param puNewRsp Where to return the new RSP value.
6383 */
6384DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6385{
6386 RTUINT64U uTmpRsp;
6387 RTGCPTR GCPtrTop;
6388 uTmpRsp.u = pCtx->rsp;
6389
6390 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6391 GCPtrTop = uTmpRsp.u -= cbItem;
6392 else if (pCtx->ss.Attr.n.u1DefBig)
6393 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6394 else
6395 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6396 *puNewRsp = uTmpRsp.u;
6397 return GCPtrTop;
6398}
6399
6400
6401/**
6402 * Gets the current stack pointer and calculates the value after a pop of the
6403 * specified size.
6404 *
6405 * @returns Current stack pointer.
6406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6407 * @param pCtx Where to get the current stack mode.
6408 * @param cbItem The size of the stack item to pop.
6409 * @param puNewRsp Where to return the new RSP value.
6410 */
6411DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6412{
6413 RTUINT64U uTmpRsp;
6414 RTGCPTR GCPtrTop;
6415 uTmpRsp.u = pCtx->rsp;
6416
6417 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6418 {
6419 GCPtrTop = uTmpRsp.u;
6420 uTmpRsp.u += cbItem;
6421 }
6422 else if (pCtx->ss.Attr.n.u1DefBig)
6423 {
6424 GCPtrTop = uTmpRsp.DWords.dw0;
6425 uTmpRsp.DWords.dw0 += cbItem;
6426 }
6427 else
6428 {
6429 GCPtrTop = uTmpRsp.Words.w0;
6430 uTmpRsp.Words.w0 += cbItem;
6431 }
6432 *puNewRsp = uTmpRsp.u;
6433 return GCPtrTop;
6434}
6435
6436
6437/**
6438 * Calculates the effective stack address for a push of the specified size as
6439 * well as the new temporary RSP value (upper bits may be masked).
6440 *
6441 * @returns Effective stack addressf for the push.
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param pCtx Where to get the current stack mode.
6444 * @param pTmpRsp The temporary stack pointer. This is updated.
6445 * @param cbItem The size of the stack item to pop.
6446 */
6447DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6448{
6449 RTGCPTR GCPtrTop;
6450
6451 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6452 GCPtrTop = pTmpRsp->u -= cbItem;
6453 else if (pCtx->ss.Attr.n.u1DefBig)
6454 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6455 else
6456 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6457 return GCPtrTop;
6458}
6459
6460
6461/**
6462 * Gets the effective stack address for a pop of the specified size and
6463 * calculates and updates the temporary RSP.
6464 *
6465 * @returns Current stack pointer.
6466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6467 * @param pCtx Where to get the current stack mode.
6468 * @param pTmpRsp The temporary stack pointer. This is updated.
6469 * @param cbItem The size of the stack item to pop.
6470 */
6471DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6472{
6473 RTGCPTR GCPtrTop;
6474 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6475 {
6476 GCPtrTop = pTmpRsp->u;
6477 pTmpRsp->u += cbItem;
6478 }
6479 else if (pCtx->ss.Attr.n.u1DefBig)
6480 {
6481 GCPtrTop = pTmpRsp->DWords.dw0;
6482 pTmpRsp->DWords.dw0 += cbItem;
6483 }
6484 else
6485 {
6486 GCPtrTop = pTmpRsp->Words.w0;
6487 pTmpRsp->Words.w0 += cbItem;
6488 }
6489 return GCPtrTop;
6490}
6491
6492/** @} */
6493
6494
6495/** @name FPU access and helpers.
6496 *
6497 * @{
6498 */
6499
6500
6501/**
6502 * Hook for preparing to use the host FPU.
6503 *
6504 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6505 *
6506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6507 */
6508DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6509{
6510#ifdef IN_RING3
6511 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6512#else
6513 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6514#endif
6515}
6516
6517
6518/**
6519 * Hook for preparing to use the host FPU for SSE
6520 *
6521 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6522 *
6523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6524 */
6525DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6526{
6527 iemFpuPrepareUsage(pVCpu);
6528}
6529
6530
6531/**
6532 * Hook for actualizing the guest FPU state before the interpreter reads it.
6533 *
6534 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6535 *
6536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6537 */
6538DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6539{
6540#ifdef IN_RING3
6541 NOREF(pVCpu);
6542#else
6543 CPUMRZFpuStateActualizeForRead(pVCpu);
6544#endif
6545}
6546
6547
6548/**
6549 * Hook for actualizing the guest FPU state before the interpreter changes it.
6550 *
6551 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6552 *
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 */
6555DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6556{
6557#ifdef IN_RING3
6558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6559#else
6560 CPUMRZFpuStateActualizeForChange(pVCpu);
6561#endif
6562}
6563
6564
6565/**
6566 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6567 * only.
6568 *
6569 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6570 *
6571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6572 */
6573DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6574{
6575#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6576 NOREF(pVCpu);
6577#else
6578 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6579#endif
6580}
6581
6582
6583/**
6584 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6585 * read+write.
6586 *
6587 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6588 *
6589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6590 */
6591DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6592{
6593#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6594 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6595#else
6596 CPUMRZFpuStateActualizeForChange(pVCpu);
6597#endif
6598}
6599
6600
6601/**
6602 * Stores a QNaN value into a FPU register.
6603 *
6604 * @param pReg Pointer to the register.
6605 */
6606DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6607{
6608 pReg->au32[0] = UINT32_C(0x00000000);
6609 pReg->au32[1] = UINT32_C(0xc0000000);
6610 pReg->au16[4] = UINT16_C(0xffff);
6611}
6612
6613
6614/**
6615 * Updates the FOP, FPU.CS and FPUIP registers.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param pCtx The CPU context.
6619 * @param pFpuCtx The FPU context.
6620 */
6621DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6622{
6623 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6624 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6625 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6626 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6627 {
6628 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6629 * happens in real mode here based on the fnsave and fnstenv images. */
6630 pFpuCtx->CS = 0;
6631 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6632 }
6633 else
6634 {
6635 pFpuCtx->CS = pCtx->cs.Sel;
6636 pFpuCtx->FPUIP = pCtx->rip;
6637 }
6638}
6639
6640
6641/**
6642 * Updates the x87.DS and FPUDP registers.
6643 *
6644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6645 * @param pCtx The CPU context.
6646 * @param pFpuCtx The FPU context.
6647 * @param iEffSeg The effective segment register.
6648 * @param GCPtrEff The effective address relative to @a iEffSeg.
6649 */
6650DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6651{
6652 RTSEL sel;
6653 switch (iEffSeg)
6654 {
6655 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6656 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6657 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6658 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6659 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6660 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6661 default:
6662 AssertMsgFailed(("%d\n", iEffSeg));
6663 sel = pCtx->ds.Sel;
6664 }
6665 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6666 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6667 {
6668 pFpuCtx->DS = 0;
6669 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6670 }
6671 else
6672 {
6673 pFpuCtx->DS = sel;
6674 pFpuCtx->FPUDP = GCPtrEff;
6675 }
6676}
6677
6678
6679/**
6680 * Rotates the stack registers in the push direction.
6681 *
6682 * @param pFpuCtx The FPU context.
6683 * @remarks This is a complete waste of time, but fxsave stores the registers in
6684 * stack order.
6685 */
6686DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6687{
6688 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6689 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6690 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6691 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6692 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6693 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6694 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6695 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6696 pFpuCtx->aRegs[0].r80 = r80Tmp;
6697}
6698
6699
6700/**
6701 * Rotates the stack registers in the pop direction.
6702 *
6703 * @param pFpuCtx The FPU context.
6704 * @remarks This is a complete waste of time, but fxsave stores the registers in
6705 * stack order.
6706 */
6707DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6708{
6709 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6710 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6711 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6712 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6713 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6714 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6715 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6716 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6717 pFpuCtx->aRegs[7].r80 = r80Tmp;
6718}
6719
6720
6721/**
6722 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6723 * exception prevents it.
6724 *
6725 * @param pResult The FPU operation result to push.
6726 * @param pFpuCtx The FPU context.
6727 */
6728IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6729{
6730 /* Update FSW and bail if there are pending exceptions afterwards. */
6731 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6732 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6733 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6734 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6735 {
6736 pFpuCtx->FSW = fFsw;
6737 return;
6738 }
6739
6740 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6741 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6742 {
6743 /* All is fine, push the actual value. */
6744 pFpuCtx->FTW |= RT_BIT(iNewTop);
6745 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6746 }
6747 else if (pFpuCtx->FCW & X86_FCW_IM)
6748 {
6749 /* Masked stack overflow, push QNaN. */
6750 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6751 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6752 }
6753 else
6754 {
6755 /* Raise stack overflow, don't push anything. */
6756 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6757 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6758 return;
6759 }
6760
6761 fFsw &= ~X86_FSW_TOP_MASK;
6762 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6763 pFpuCtx->FSW = fFsw;
6764
6765 iemFpuRotateStackPush(pFpuCtx);
6766}
6767
6768
6769/**
6770 * Stores a result in a FPU register and updates the FSW and FTW.
6771 *
6772 * @param pFpuCtx The FPU context.
6773 * @param pResult The result to store.
6774 * @param iStReg Which FPU register to store it in.
6775 */
6776IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6777{
6778 Assert(iStReg < 8);
6779 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6780 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6781 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6782 pFpuCtx->FTW |= RT_BIT(iReg);
6783 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6784}
6785
6786
6787/**
6788 * Only updates the FPU status word (FSW) with the result of the current
6789 * instruction.
6790 *
6791 * @param pFpuCtx The FPU context.
6792 * @param u16FSW The FSW output of the current instruction.
6793 */
6794IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6795{
6796 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6797 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6798}
6799
6800
6801/**
6802 * Pops one item off the FPU stack if no pending exception prevents it.
6803 *
6804 * @param pFpuCtx The FPU context.
6805 */
6806IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6807{
6808 /* Check pending exceptions. */
6809 uint16_t uFSW = pFpuCtx->FSW;
6810 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6811 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6812 return;
6813
6814 /* TOP--. */
6815 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6816 uFSW &= ~X86_FSW_TOP_MASK;
6817 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6818 pFpuCtx->FSW = uFSW;
6819
6820 /* Mark the previous ST0 as empty. */
6821 iOldTop >>= X86_FSW_TOP_SHIFT;
6822 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6823
6824 /* Rotate the registers. */
6825 iemFpuRotateStackPop(pFpuCtx);
6826}
6827
6828
6829/**
6830 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6831 *
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 * @param pResult The FPU operation result to push.
6834 */
6835IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6836{
6837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6839 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6840 iemFpuMaybePushResult(pResult, pFpuCtx);
6841}
6842
6843
6844/**
6845 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6846 * and sets FPUDP and FPUDS.
6847 *
6848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6849 * @param pResult The FPU operation result to push.
6850 * @param iEffSeg The effective segment register.
6851 * @param GCPtrEff The effective address relative to @a iEffSeg.
6852 */
6853IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6854{
6855 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6856 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6857 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6858 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6859 iemFpuMaybePushResult(pResult, pFpuCtx);
6860}
6861
6862
6863/**
6864 * Replace ST0 with the first value and push the second onto the FPU stack,
6865 * unless a pending exception prevents it.
6866 *
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 * @param pResult The FPU operation result to store and push.
6869 */
6870IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6871{
6872 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6873 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6874 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6875
6876 /* Update FSW and bail if there are pending exceptions afterwards. */
6877 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6878 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6879 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6880 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6881 {
6882 pFpuCtx->FSW = fFsw;
6883 return;
6884 }
6885
6886 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6887 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6888 {
6889 /* All is fine, push the actual value. */
6890 pFpuCtx->FTW |= RT_BIT(iNewTop);
6891 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6892 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6893 }
6894 else if (pFpuCtx->FCW & X86_FCW_IM)
6895 {
6896 /* Masked stack overflow, push QNaN. */
6897 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6898 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6899 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6900 }
6901 else
6902 {
6903 /* Raise stack overflow, don't push anything. */
6904 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6905 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6906 return;
6907 }
6908
6909 fFsw &= ~X86_FSW_TOP_MASK;
6910 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6911 pFpuCtx->FSW = fFsw;
6912
6913 iemFpuRotateStackPush(pFpuCtx);
6914}
6915
6916
6917/**
6918 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6919 * FOP.
6920 *
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 * @param pResult The result to store.
6923 * @param iStReg Which FPU register to store it in.
6924 */
6925IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6926{
6927 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6928 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6929 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6930 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6931}
6932
6933
6934/**
6935 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6936 * FOP, and then pops the stack.
6937 *
6938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6939 * @param pResult The result to store.
6940 * @param iStReg Which FPU register to store it in.
6941 */
6942IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6943{
6944 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6945 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6946 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6947 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6948 iemFpuMaybePopOne(pFpuCtx);
6949}
6950
6951
6952/**
6953 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6954 * FPUDP, and FPUDS.
6955 *
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 * @param pResult The result to store.
6958 * @param iStReg Which FPU register to store it in.
6959 * @param iEffSeg The effective memory operand selector register.
6960 * @param GCPtrEff The effective memory operand offset.
6961 */
6962IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6963 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6964{
6965 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6966 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6967 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6968 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6969 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6970}
6971
6972
6973/**
6974 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6975 * FPUDP, and FPUDS, and then pops the stack.
6976 *
6977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6978 * @param pResult The result to store.
6979 * @param iStReg Which FPU register to store it in.
6980 * @param iEffSeg The effective memory operand selector register.
6981 * @param GCPtrEff The effective memory operand offset.
6982 */
6983IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6984 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6985{
6986 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6987 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6988 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6989 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6990 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6991 iemFpuMaybePopOne(pFpuCtx);
6992}
6993
6994
6995/**
6996 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7001{
7002 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7003 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7004 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7005}
7006
7007
7008/**
7009 * Marks the specified stack register as free (for FFREE).
7010 *
7011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7012 * @param iStReg The register to free.
7013 */
7014IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7015{
7016 Assert(iStReg < 8);
7017 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7018 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7019 pFpuCtx->FTW &= ~RT_BIT(iReg);
7020}
7021
7022
7023/**
7024 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7025 *
7026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7027 */
7028IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7029{
7030 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7031 uint16_t uFsw = pFpuCtx->FSW;
7032 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7033 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7034 uFsw &= ~X86_FSW_TOP_MASK;
7035 uFsw |= uTop;
7036 pFpuCtx->FSW = uFsw;
7037}
7038
7039
7040/**
7041 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7042 *
7043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7044 */
7045IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7046{
7047 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7048 uint16_t uFsw = pFpuCtx->FSW;
7049 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7050 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7051 uFsw &= ~X86_FSW_TOP_MASK;
7052 uFsw |= uTop;
7053 pFpuCtx->FSW = uFsw;
7054}
7055
7056
7057/**
7058 * Updates the FSW, FOP, FPUIP, and FPUCS.
7059 *
7060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7061 * @param u16FSW The FSW from the current instruction.
7062 */
7063IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7064{
7065 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7066 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7067 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7068 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7069}
7070
7071
7072/**
7073 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7074 *
7075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7076 * @param u16FSW The FSW from the current instruction.
7077 */
7078IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7079{
7080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7081 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7082 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7083 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7084 iemFpuMaybePopOne(pFpuCtx);
7085}
7086
7087
7088/**
7089 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7090 *
7091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7092 * @param u16FSW The FSW from the current instruction.
7093 * @param iEffSeg The effective memory operand selector register.
7094 * @param GCPtrEff The effective memory operand offset.
7095 */
7096IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7097{
7098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7099 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7100 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7101 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7102 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7103}
7104
7105
7106/**
7107 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7108 *
7109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7110 * @param u16FSW The FSW from the current instruction.
7111 */
7112IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7113{
7114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7115 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7116 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7117 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7118 iemFpuMaybePopOne(pFpuCtx);
7119 iemFpuMaybePopOne(pFpuCtx);
7120}
7121
7122
7123/**
7124 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7125 *
7126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7127 * @param u16FSW The FSW from the current instruction.
7128 * @param iEffSeg The effective memory operand selector register.
7129 * @param GCPtrEff The effective memory operand offset.
7130 */
7131IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7132{
7133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7134 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7135 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7136 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7137 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7138 iemFpuMaybePopOne(pFpuCtx);
7139}
7140
7141
7142/**
7143 * Worker routine for raising an FPU stack underflow exception.
7144 *
7145 * @param pFpuCtx The FPU context.
7146 * @param iStReg The stack register being accessed.
7147 */
7148IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7149{
7150 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7151 if (pFpuCtx->FCW & X86_FCW_IM)
7152 {
7153 /* Masked underflow. */
7154 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7155 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7156 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7157 if (iStReg != UINT8_MAX)
7158 {
7159 pFpuCtx->FTW |= RT_BIT(iReg);
7160 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7161 }
7162 }
7163 else
7164 {
7165 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7166 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7167 }
7168}
7169
7170
7171/**
7172 * Raises a FPU stack underflow exception.
7173 *
7174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7175 * @param iStReg The destination register that should be loaded
7176 * with QNaN if \#IS is not masked. Specify
7177 * UINT8_MAX if none (like for fcom).
7178 */
7179DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7180{
7181 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7182 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7183 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7184 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7185}
7186
7187
7188DECL_NO_INLINE(IEM_STATIC, void)
7189iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7190{
7191 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7192 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7193 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7194 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7195 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7196}
7197
7198
7199DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7200{
7201 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7202 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7203 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7204 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7205 iemFpuMaybePopOne(pFpuCtx);
7206}
7207
7208
7209DECL_NO_INLINE(IEM_STATIC, void)
7210iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7211{
7212 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7213 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7214 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7215 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7216 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7217 iemFpuMaybePopOne(pFpuCtx);
7218}
7219
7220
7221DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7222{
7223 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7224 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7225 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7226 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7227 iemFpuMaybePopOne(pFpuCtx);
7228 iemFpuMaybePopOne(pFpuCtx);
7229}
7230
7231
7232DECL_NO_INLINE(IEM_STATIC, void)
7233iemFpuStackPushUnderflow(PVMCPU pVCpu)
7234{
7235 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7236 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7237 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7238
7239 if (pFpuCtx->FCW & X86_FCW_IM)
7240 {
7241 /* Masked overflow - Push QNaN. */
7242 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7243 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7244 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7245 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7246 pFpuCtx->FTW |= RT_BIT(iNewTop);
7247 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7248 iemFpuRotateStackPush(pFpuCtx);
7249 }
7250 else
7251 {
7252 /* Exception pending - don't change TOP or the register stack. */
7253 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7254 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7255 }
7256}
7257
7258
7259DECL_NO_INLINE(IEM_STATIC, void)
7260iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7261{
7262 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7263 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7264 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7265
7266 if (pFpuCtx->FCW & X86_FCW_IM)
7267 {
7268 /* Masked overflow - Push QNaN. */
7269 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7270 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7271 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7272 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7273 pFpuCtx->FTW |= RT_BIT(iNewTop);
7274 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7275 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7276 iemFpuRotateStackPush(pFpuCtx);
7277 }
7278 else
7279 {
7280 /* Exception pending - don't change TOP or the register stack. */
7281 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7282 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7283 }
7284}
7285
7286
7287/**
7288 * Worker routine for raising an FPU stack overflow exception on a push.
7289 *
7290 * @param pFpuCtx The FPU context.
7291 */
7292IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7293{
7294 if (pFpuCtx->FCW & X86_FCW_IM)
7295 {
7296 /* Masked overflow. */
7297 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7298 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7299 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7300 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7301 pFpuCtx->FTW |= RT_BIT(iNewTop);
7302 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7303 iemFpuRotateStackPush(pFpuCtx);
7304 }
7305 else
7306 {
7307 /* Exception pending - don't change TOP or the register stack. */
7308 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7309 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7310 }
7311}
7312
7313
7314/**
7315 * Raises a FPU stack overflow exception on a push.
7316 *
7317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7318 */
7319DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7320{
7321 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7322 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7323 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7324 iemFpuStackPushOverflowOnly(pFpuCtx);
7325}
7326
7327
7328/**
7329 * Raises a FPU stack overflow exception on a push with a memory operand.
7330 *
7331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7332 * @param iEffSeg The effective memory operand selector register.
7333 * @param GCPtrEff The effective memory operand offset.
7334 */
7335DECL_NO_INLINE(IEM_STATIC, void)
7336iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7337{
7338 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7339 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7340 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7341 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7342 iemFpuStackPushOverflowOnly(pFpuCtx);
7343}
7344
7345
7346IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7347{
7348 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7349 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7350 if (pFpuCtx->FTW & RT_BIT(iReg))
7351 return VINF_SUCCESS;
7352 return VERR_NOT_FOUND;
7353}
7354
7355
7356IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7357{
7358 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7359 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7360 if (pFpuCtx->FTW & RT_BIT(iReg))
7361 {
7362 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7363 return VINF_SUCCESS;
7364 }
7365 return VERR_NOT_FOUND;
7366}
7367
7368
7369IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7370 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7371{
7372 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7373 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7374 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7375 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7376 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7377 {
7378 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7379 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7380 return VINF_SUCCESS;
7381 }
7382 return VERR_NOT_FOUND;
7383}
7384
7385
7386IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7387{
7388 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7389 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7390 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7391 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7392 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7393 {
7394 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7395 return VINF_SUCCESS;
7396 }
7397 return VERR_NOT_FOUND;
7398}
7399
7400
7401/**
7402 * Updates the FPU exception status after FCW is changed.
7403 *
7404 * @param pFpuCtx The FPU context.
7405 */
7406IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7407{
7408 uint16_t u16Fsw = pFpuCtx->FSW;
7409 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7410 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7411 else
7412 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7413 pFpuCtx->FSW = u16Fsw;
7414}
7415
7416
7417/**
7418 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7419 *
7420 * @returns The full FTW.
7421 * @param pFpuCtx The FPU context.
7422 */
7423IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7424{
7425 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7426 uint16_t u16Ftw = 0;
7427 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7428 for (unsigned iSt = 0; iSt < 8; iSt++)
7429 {
7430 unsigned const iReg = (iSt + iTop) & 7;
7431 if (!(u8Ftw & RT_BIT(iReg)))
7432 u16Ftw |= 3 << (iReg * 2); /* empty */
7433 else
7434 {
7435 uint16_t uTag;
7436 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7437 if (pr80Reg->s.uExponent == 0x7fff)
7438 uTag = 2; /* Exponent is all 1's => Special. */
7439 else if (pr80Reg->s.uExponent == 0x0000)
7440 {
7441 if (pr80Reg->s.u64Mantissa == 0x0000)
7442 uTag = 1; /* All bits are zero => Zero. */
7443 else
7444 uTag = 2; /* Must be special. */
7445 }
7446 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7447 uTag = 0; /* Valid. */
7448 else
7449 uTag = 2; /* Must be special. */
7450
7451 u16Ftw |= uTag << (iReg * 2); /* empty */
7452 }
7453 }
7454
7455 return u16Ftw;
7456}
7457
7458
7459/**
7460 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7461 *
7462 * @returns The compressed FTW.
7463 * @param u16FullFtw The full FTW to convert.
7464 */
7465IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7466{
7467 uint8_t u8Ftw = 0;
7468 for (unsigned i = 0; i < 8; i++)
7469 {
7470 if ((u16FullFtw & 3) != 3 /*empty*/)
7471 u8Ftw |= RT_BIT(i);
7472 u16FullFtw >>= 2;
7473 }
7474
7475 return u8Ftw;
7476}
7477
7478/** @} */
7479
7480
7481/** @name Memory access.
7482 *
7483 * @{
7484 */
7485
7486
7487/**
7488 * Updates the IEMCPU::cbWritten counter if applicable.
7489 *
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param fAccess The access being accounted for.
7492 * @param cbMem The access size.
7493 */
7494DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7495{
7496 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7497 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7498 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7499}
7500
7501
7502/**
7503 * Checks if the given segment can be written to, raise the appropriate
7504 * exception if not.
7505 *
7506 * @returns VBox strict status code.
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 * @param pHid Pointer to the hidden register.
7510 * @param iSegReg The register number.
7511 * @param pu64BaseAddr Where to return the base address to use for the
7512 * segment. (In 64-bit code it may differ from the
7513 * base in the hidden segment.)
7514 */
7515IEM_STATIC VBOXSTRICTRC
7516iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7517{
7518 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7519 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7520 else
7521 {
7522 if (!pHid->Attr.n.u1Present)
7523 {
7524 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7525 AssertRelease(uSel == 0);
7526 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7527 return iemRaiseGeneralProtectionFault0(pVCpu);
7528 }
7529
7530 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7531 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7532 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7533 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7534 *pu64BaseAddr = pHid->u64Base;
7535 }
7536 return VINF_SUCCESS;
7537}
7538
7539
7540/**
7541 * Checks if the given segment can be read from, raise the appropriate
7542 * exception if not.
7543 *
7544 * @returns VBox strict status code.
7545 *
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 * @param pHid Pointer to the hidden register.
7548 * @param iSegReg The register number.
7549 * @param pu64BaseAddr Where to return the base address to use for the
7550 * segment. (In 64-bit code it may differ from the
7551 * base in the hidden segment.)
7552 */
7553IEM_STATIC VBOXSTRICTRC
7554iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7555{
7556 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7557 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7558 else
7559 {
7560 if (!pHid->Attr.n.u1Present)
7561 {
7562 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7563 AssertRelease(uSel == 0);
7564 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7565 return iemRaiseGeneralProtectionFault0(pVCpu);
7566 }
7567
7568 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7569 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7570 *pu64BaseAddr = pHid->u64Base;
7571 }
7572 return VINF_SUCCESS;
7573}
7574
7575
7576/**
7577 * Applies the segment limit, base and attributes.
7578 *
7579 * This may raise a \#GP or \#SS.
7580 *
7581 * @returns VBox strict status code.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param fAccess The kind of access which is being performed.
7585 * @param iSegReg The index of the segment register to apply.
7586 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7587 * TSS, ++).
7588 * @param cbMem The access size.
7589 * @param pGCPtrMem Pointer to the guest memory address to apply
7590 * segmentation to. Input and output parameter.
7591 */
7592IEM_STATIC VBOXSTRICTRC
7593iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7594{
7595 if (iSegReg == UINT8_MAX)
7596 return VINF_SUCCESS;
7597
7598 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7599 switch (pVCpu->iem.s.enmCpuMode)
7600 {
7601 case IEMMODE_16BIT:
7602 case IEMMODE_32BIT:
7603 {
7604 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7605 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7606
7607 if ( pSel->Attr.n.u1Present
7608 && !pSel->Attr.n.u1Unusable)
7609 {
7610 Assert(pSel->Attr.n.u1DescType);
7611 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7612 {
7613 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7614 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7615 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7616
7617 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7618 {
7619 /** @todo CPL check. */
7620 }
7621
7622 /*
7623 * There are two kinds of data selectors, normal and expand down.
7624 */
7625 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7626 {
7627 if ( GCPtrFirst32 > pSel->u32Limit
7628 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7629 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7630 }
7631 else
7632 {
7633 /*
7634 * The upper boundary is defined by the B bit, not the G bit!
7635 */
7636 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7637 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7638 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7639 }
7640 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7641 }
7642 else
7643 {
7644
7645 /*
7646 * Code selector and usually be used to read thru, writing is
7647 * only permitted in real and V8086 mode.
7648 */
7649 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7650 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7651 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7652 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7653 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7654
7655 if ( GCPtrFirst32 > pSel->u32Limit
7656 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7657 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7658
7659 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7660 {
7661 /** @todo CPL check. */
7662 }
7663
7664 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7665 }
7666 }
7667 else
7668 return iemRaiseGeneralProtectionFault0(pVCpu);
7669 return VINF_SUCCESS;
7670 }
7671
7672 case IEMMODE_64BIT:
7673 {
7674 RTGCPTR GCPtrMem = *pGCPtrMem;
7675 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7676 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7677
7678 Assert(cbMem >= 1);
7679 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7680 return VINF_SUCCESS;
7681 return iemRaiseGeneralProtectionFault0(pVCpu);
7682 }
7683
7684 default:
7685 AssertFailedReturn(VERR_IEM_IPE_7);
7686 }
7687}
7688
7689
7690/**
7691 * Translates a virtual address to a physical physical address and checks if we
7692 * can access the page as specified.
7693 *
7694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7695 * @param GCPtrMem The virtual address.
7696 * @param fAccess The intended access.
7697 * @param pGCPhysMem Where to return the physical address.
7698 */
7699IEM_STATIC VBOXSTRICTRC
7700iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7701{
7702 /** @todo Need a different PGM interface here. We're currently using
7703 * generic / REM interfaces. this won't cut it for R0 & RC. */
7704 RTGCPHYS GCPhys;
7705 uint64_t fFlags;
7706 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7707 if (RT_FAILURE(rc))
7708 {
7709 /** @todo Check unassigned memory in unpaged mode. */
7710 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7711 *pGCPhysMem = NIL_RTGCPHYS;
7712 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7713 }
7714
7715 /* If the page is writable and does not have the no-exec bit set, all
7716 access is allowed. Otherwise we'll have to check more carefully... */
7717 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7718 {
7719 /* Write to read only memory? */
7720 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7721 && !(fFlags & X86_PTE_RW)
7722 && ( (pVCpu->iem.s.uCpl == 3
7723 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7724 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7725 {
7726 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7727 *pGCPhysMem = NIL_RTGCPHYS;
7728 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7729 }
7730
7731 /* Kernel memory accessed by userland? */
7732 if ( !(fFlags & X86_PTE_US)
7733 && pVCpu->iem.s.uCpl == 3
7734 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7735 {
7736 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7737 *pGCPhysMem = NIL_RTGCPHYS;
7738 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7739 }
7740
7741 /* Executing non-executable memory? */
7742 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7743 && (fFlags & X86_PTE_PAE_NX)
7744 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7745 {
7746 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7747 *pGCPhysMem = NIL_RTGCPHYS;
7748 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7749 VERR_ACCESS_DENIED);
7750 }
7751 }
7752
7753 /*
7754 * Set the dirty / access flags.
7755 * ASSUMES this is set when the address is translated rather than on committ...
7756 */
7757 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7758 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7759 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7760 {
7761 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7762 AssertRC(rc2);
7763 }
7764
7765 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7766 *pGCPhysMem = GCPhys;
7767 return VINF_SUCCESS;
7768}
7769
7770
7771
7772/**
7773 * Maps a physical page.
7774 *
7775 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7777 * @param GCPhysMem The physical address.
7778 * @param fAccess The intended access.
7779 * @param ppvMem Where to return the mapping address.
7780 * @param pLock The PGM lock.
7781 */
7782IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7783{
7784#ifdef IEM_VERIFICATION_MODE_FULL
7785 /* Force the alternative path so we can ignore writes. */
7786 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7787 {
7788 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7789 {
7790 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7791 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7792 if (RT_FAILURE(rc2))
7793 pVCpu->iem.s.fProblematicMemory = true;
7794 }
7795 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7796 }
7797#endif
7798#ifdef IEM_LOG_MEMORY_WRITES
7799 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7800 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7801#endif
7802#ifdef IEM_VERIFICATION_MODE_MINIMAL
7803 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7804#endif
7805
7806 /** @todo This API may require some improving later. A private deal with PGM
7807 * regarding locking and unlocking needs to be struct. A couple of TLBs
7808 * living in PGM, but with publicly accessible inlined access methods
7809 * could perhaps be an even better solution. */
7810 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7811 GCPhysMem,
7812 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7813 pVCpu->iem.s.fBypassHandlers,
7814 ppvMem,
7815 pLock);
7816 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7817 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7818
7819#ifdef IEM_VERIFICATION_MODE_FULL
7820 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7821 pVCpu->iem.s.fProblematicMemory = true;
7822#endif
7823 return rc;
7824}
7825
7826
7827/**
7828 * Unmap a page previously mapped by iemMemPageMap.
7829 *
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param GCPhysMem The physical address.
7832 * @param fAccess The intended access.
7833 * @param pvMem What iemMemPageMap returned.
7834 * @param pLock The PGM lock.
7835 */
7836DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7837{
7838 NOREF(pVCpu);
7839 NOREF(GCPhysMem);
7840 NOREF(fAccess);
7841 NOREF(pvMem);
7842 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7843}
7844
7845
7846/**
7847 * Looks up a memory mapping entry.
7848 *
7849 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7851 * @param pvMem The memory address.
7852 * @param fAccess The access to.
7853 */
7854DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7855{
7856 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7857 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7858 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7859 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7860 return 0;
7861 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7862 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7863 return 1;
7864 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7865 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7866 return 2;
7867 return VERR_NOT_FOUND;
7868}
7869
7870
7871/**
7872 * Finds a free memmap entry when using iNextMapping doesn't work.
7873 *
7874 * @returns Memory mapping index, 1024 on failure.
7875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7876 */
7877IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7878{
7879 /*
7880 * The easy case.
7881 */
7882 if (pVCpu->iem.s.cActiveMappings == 0)
7883 {
7884 pVCpu->iem.s.iNextMapping = 1;
7885 return 0;
7886 }
7887
7888 /* There should be enough mappings for all instructions. */
7889 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7890
7891 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7892 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7893 return i;
7894
7895 AssertFailedReturn(1024);
7896}
7897
7898
7899/**
7900 * Commits a bounce buffer that needs writing back and unmaps it.
7901 *
7902 * @returns Strict VBox status code.
7903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7904 * @param iMemMap The index of the buffer to commit.
7905 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7906 * Always false in ring-3, obviously.
7907 */
7908IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7909{
7910 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7911 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7912#ifdef IN_RING3
7913 Assert(!fPostponeFail);
7914 RT_NOREF_PV(fPostponeFail);
7915#endif
7916
7917 /*
7918 * Do the writing.
7919 */
7920#ifndef IEM_VERIFICATION_MODE_MINIMAL
7921 PVM pVM = pVCpu->CTX_SUFF(pVM);
7922 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7923 && !IEM_VERIFICATION_ENABLED(pVCpu))
7924 {
7925 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7926 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7927 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7928 if (!pVCpu->iem.s.fBypassHandlers)
7929 {
7930 /*
7931 * Carefully and efficiently dealing with access handler return
7932 * codes make this a little bloated.
7933 */
7934 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7936 pbBuf,
7937 cbFirst,
7938 PGMACCESSORIGIN_IEM);
7939 if (rcStrict == VINF_SUCCESS)
7940 {
7941 if (cbSecond)
7942 {
7943 rcStrict = PGMPhysWrite(pVM,
7944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7945 pbBuf + cbFirst,
7946 cbSecond,
7947 PGMACCESSORIGIN_IEM);
7948 if (rcStrict == VINF_SUCCESS)
7949 { /* nothing */ }
7950 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7951 {
7952 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7955 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7956 }
7957# ifndef IN_RING3
7958 else if (fPostponeFail)
7959 {
7960 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7961 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7963 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7964 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7965 return iemSetPassUpStatus(pVCpu, rcStrict);
7966 }
7967# endif
7968 else
7969 {
7970 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7971 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7972 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7973 return rcStrict;
7974 }
7975 }
7976 }
7977 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7978 {
7979 if (!cbSecond)
7980 {
7981 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7982 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7983 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7984 }
7985 else
7986 {
7987 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7988 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7989 pbBuf + cbFirst,
7990 cbSecond,
7991 PGMACCESSORIGIN_IEM);
7992 if (rcStrict2 == VINF_SUCCESS)
7993 {
7994 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7995 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7996 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7997 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7998 }
7999 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8000 {
8001 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8002 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8003 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8004 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8005 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8006 }
8007# ifndef IN_RING3
8008 else if (fPostponeFail)
8009 {
8010 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8011 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8012 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8013 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8014 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8015 return iemSetPassUpStatus(pVCpu, rcStrict);
8016 }
8017# endif
8018 else
8019 {
8020 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8021 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8022 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8023 return rcStrict2;
8024 }
8025 }
8026 }
8027# ifndef IN_RING3
8028 else if (fPostponeFail)
8029 {
8030 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8031 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8032 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8033 if (!cbSecond)
8034 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8035 else
8036 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8037 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8038 return iemSetPassUpStatus(pVCpu, rcStrict);
8039 }
8040# endif
8041 else
8042 {
8043 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8044 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8045 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8046 return rcStrict;
8047 }
8048 }
8049 else
8050 {
8051 /*
8052 * No access handlers, much simpler.
8053 */
8054 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8055 if (RT_SUCCESS(rc))
8056 {
8057 if (cbSecond)
8058 {
8059 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8060 if (RT_SUCCESS(rc))
8061 { /* likely */ }
8062 else
8063 {
8064 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8065 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8066 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8067 return rc;
8068 }
8069 }
8070 }
8071 else
8072 {
8073 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8074 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8075 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8076 return rc;
8077 }
8078 }
8079 }
8080#endif
8081
8082#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8083 /*
8084 * Record the write(s).
8085 */
8086 if (!pVCpu->iem.s.fNoRem)
8087 {
8088 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8089 if (pEvtRec)
8090 {
8091 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8092 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8093 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8094 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8095 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8096 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8097 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8098 }
8099 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8100 {
8101 pEvtRec = iemVerifyAllocRecord(pVCpu);
8102 if (pEvtRec)
8103 {
8104 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8105 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8106 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8107 memcpy(pEvtRec->u.RamWrite.ab,
8108 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8109 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8110 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8111 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8112 }
8113 }
8114 }
8115#endif
8116#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8117 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8118 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8119 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8120 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8121 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8122 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8123
8124 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8125 g_cbIemWrote = cbWrote;
8126 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8127#endif
8128
8129 /*
8130 * Free the mapping entry.
8131 */
8132 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8133 Assert(pVCpu->iem.s.cActiveMappings != 0);
8134 pVCpu->iem.s.cActiveMappings--;
8135 return VINF_SUCCESS;
8136}
8137
8138
8139/**
8140 * iemMemMap worker that deals with a request crossing pages.
8141 */
8142IEM_STATIC VBOXSTRICTRC
8143iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8144{
8145 /*
8146 * Do the address translations.
8147 */
8148 RTGCPHYS GCPhysFirst;
8149 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8150 if (rcStrict != VINF_SUCCESS)
8151 return rcStrict;
8152
8153 RTGCPHYS GCPhysSecond;
8154 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8155 fAccess, &GCPhysSecond);
8156 if (rcStrict != VINF_SUCCESS)
8157 return rcStrict;
8158 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8159
8160 PVM pVM = pVCpu->CTX_SUFF(pVM);
8161#ifdef IEM_VERIFICATION_MODE_FULL
8162 /*
8163 * Detect problematic memory when verifying so we can select
8164 * the right execution engine. (TLB: Redo this.)
8165 */
8166 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8167 {
8168 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8169 if (RT_SUCCESS(rc2))
8170 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8171 if (RT_FAILURE(rc2))
8172 pVCpu->iem.s.fProblematicMemory = true;
8173 }
8174#endif
8175
8176
8177 /*
8178 * Read in the current memory content if it's a read, execute or partial
8179 * write access.
8180 */
8181 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8182 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8183 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8184
8185 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8186 {
8187 if (!pVCpu->iem.s.fBypassHandlers)
8188 {
8189 /*
8190 * Must carefully deal with access handler status codes here,
8191 * makes the code a bit bloated.
8192 */
8193 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8194 if (rcStrict == VINF_SUCCESS)
8195 {
8196 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8197 if (rcStrict == VINF_SUCCESS)
8198 { /*likely */ }
8199 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8200 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8201 else
8202 {
8203 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8204 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8205 return rcStrict;
8206 }
8207 }
8208 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8209 {
8210 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8211 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8212 {
8213 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8214 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8215 }
8216 else
8217 {
8218 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8219 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8220 return rcStrict2;
8221 }
8222 }
8223 else
8224 {
8225 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8226 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8227 return rcStrict;
8228 }
8229 }
8230 else
8231 {
8232 /*
8233 * No informational status codes here, much more straight forward.
8234 */
8235 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8236 if (RT_SUCCESS(rc))
8237 {
8238 Assert(rc == VINF_SUCCESS);
8239 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8240 if (RT_SUCCESS(rc))
8241 Assert(rc == VINF_SUCCESS);
8242 else
8243 {
8244 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8245 return rc;
8246 }
8247 }
8248 else
8249 {
8250 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8251 return rc;
8252 }
8253 }
8254
8255#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8256 if ( !pVCpu->iem.s.fNoRem
8257 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8258 {
8259 /*
8260 * Record the reads.
8261 */
8262 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8263 if (pEvtRec)
8264 {
8265 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8266 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8267 pEvtRec->u.RamRead.cb = cbFirstPage;
8268 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8269 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8270 }
8271 pEvtRec = iemVerifyAllocRecord(pVCpu);
8272 if (pEvtRec)
8273 {
8274 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8275 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8276 pEvtRec->u.RamRead.cb = cbSecondPage;
8277 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8278 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8279 }
8280 }
8281#endif
8282 }
8283#ifdef VBOX_STRICT
8284 else
8285 memset(pbBuf, 0xcc, cbMem);
8286 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8287 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8288#endif
8289
8290 /*
8291 * Commit the bounce buffer entry.
8292 */
8293 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8295 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8296 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8297 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8298 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8299 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8300 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8301 pVCpu->iem.s.cActiveMappings++;
8302
8303 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8304 *ppvMem = pbBuf;
8305 return VINF_SUCCESS;
8306}
8307
8308
8309/**
8310 * iemMemMap woker that deals with iemMemPageMap failures.
8311 */
8312IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8313 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8314{
8315 /*
8316 * Filter out conditions we can handle and the ones which shouldn't happen.
8317 */
8318 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8319 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8320 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8321 {
8322 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8323 return rcMap;
8324 }
8325 pVCpu->iem.s.cPotentialExits++;
8326
8327 /*
8328 * Read in the current memory content if it's a read, execute or partial
8329 * write access.
8330 */
8331 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8332 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8333 {
8334 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8335 memset(pbBuf, 0xff, cbMem);
8336 else
8337 {
8338 int rc;
8339 if (!pVCpu->iem.s.fBypassHandlers)
8340 {
8341 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8342 if (rcStrict == VINF_SUCCESS)
8343 { /* nothing */ }
8344 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8345 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8346 else
8347 {
8348 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8349 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8350 return rcStrict;
8351 }
8352 }
8353 else
8354 {
8355 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8356 if (RT_SUCCESS(rc))
8357 { /* likely */ }
8358 else
8359 {
8360 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8361 GCPhysFirst, rc));
8362 return rc;
8363 }
8364 }
8365 }
8366
8367#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8368 if ( !pVCpu->iem.s.fNoRem
8369 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8370 {
8371 /*
8372 * Record the read.
8373 */
8374 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8375 if (pEvtRec)
8376 {
8377 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8378 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8379 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8380 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8381 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8382 }
8383 }
8384#endif
8385 }
8386#ifdef VBOX_STRICT
8387 else
8388 memset(pbBuf, 0xcc, cbMem);
8389#endif
8390#ifdef VBOX_STRICT
8391 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8392 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8393#endif
8394
8395 /*
8396 * Commit the bounce buffer entry.
8397 */
8398 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8403 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8404 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8405 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8406 pVCpu->iem.s.cActiveMappings++;
8407
8408 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8409 *ppvMem = pbBuf;
8410 return VINF_SUCCESS;
8411}
8412
8413
8414
8415/**
8416 * Maps the specified guest memory for the given kind of access.
8417 *
8418 * This may be using bounce buffering of the memory if it's crossing a page
8419 * boundary or if there is an access handler installed for any of it. Because
8420 * of lock prefix guarantees, we're in for some extra clutter when this
8421 * happens.
8422 *
8423 * This may raise a \#GP, \#SS, \#PF or \#AC.
8424 *
8425 * @returns VBox strict status code.
8426 *
8427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8428 * @param ppvMem Where to return the pointer to the mapped
8429 * memory.
8430 * @param cbMem The number of bytes to map. This is usually 1,
8431 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8432 * string operations it can be up to a page.
8433 * @param iSegReg The index of the segment register to use for
8434 * this access. The base and limits are checked.
8435 * Use UINT8_MAX to indicate that no segmentation
8436 * is required (for IDT, GDT and LDT accesses).
8437 * @param GCPtrMem The address of the guest memory.
8438 * @param fAccess How the memory is being accessed. The
8439 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8440 * how to map the memory, while the
8441 * IEM_ACCESS_WHAT_XXX bit is used when raising
8442 * exceptions.
8443 */
8444IEM_STATIC VBOXSTRICTRC
8445iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8446{
8447 /*
8448 * Check the input and figure out which mapping entry to use.
8449 */
8450 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8451 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8452 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8453
8454 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8455 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8456 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8457 {
8458 iMemMap = iemMemMapFindFree(pVCpu);
8459 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8460 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8461 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8462 pVCpu->iem.s.aMemMappings[2].fAccess),
8463 VERR_IEM_IPE_9);
8464 }
8465
8466 /*
8467 * Map the memory, checking that we can actually access it. If something
8468 * slightly complicated happens, fall back on bounce buffering.
8469 */
8470 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8471 if (rcStrict != VINF_SUCCESS)
8472 return rcStrict;
8473
8474 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8475 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8476
8477 RTGCPHYS GCPhysFirst;
8478 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8479 if (rcStrict != VINF_SUCCESS)
8480 return rcStrict;
8481
8482 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8483 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8484 if (fAccess & IEM_ACCESS_TYPE_READ)
8485 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8486
8487 void *pvMem;
8488 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8489 if (rcStrict != VINF_SUCCESS)
8490 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8491
8492 /*
8493 * Fill in the mapping table entry.
8494 */
8495 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8496 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8497 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8498 pVCpu->iem.s.cActiveMappings++;
8499
8500 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8501 *ppvMem = pvMem;
8502 return VINF_SUCCESS;
8503}
8504
8505
8506/**
8507 * Commits the guest memory if bounce buffered and unmaps it.
8508 *
8509 * @returns Strict VBox status code.
8510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8511 * @param pvMem The mapping.
8512 * @param fAccess The kind of access.
8513 */
8514IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8515{
8516 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8517 AssertReturn(iMemMap >= 0, iMemMap);
8518
8519 /* If it's bounce buffered, we may need to write back the buffer. */
8520 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8521 {
8522 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8523 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8524 }
8525 /* Otherwise unlock it. */
8526 else
8527 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8528
8529 /* Free the entry. */
8530 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8531 Assert(pVCpu->iem.s.cActiveMappings != 0);
8532 pVCpu->iem.s.cActiveMappings--;
8533 return VINF_SUCCESS;
8534}
8535
8536#ifdef IEM_WITH_SETJMP
8537
8538/**
8539 * Maps the specified guest memory for the given kind of access, longjmp on
8540 * error.
8541 *
8542 * This may be using bounce buffering of the memory if it's crossing a page
8543 * boundary or if there is an access handler installed for any of it. Because
8544 * of lock prefix guarantees, we're in for some extra clutter when this
8545 * happens.
8546 *
8547 * This may raise a \#GP, \#SS, \#PF or \#AC.
8548 *
8549 * @returns Pointer to the mapped memory.
8550 *
8551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8552 * @param cbMem The number of bytes to map. This is usually 1,
8553 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8554 * string operations it can be up to a page.
8555 * @param iSegReg The index of the segment register to use for
8556 * this access. The base and limits are checked.
8557 * Use UINT8_MAX to indicate that no segmentation
8558 * is required (for IDT, GDT and LDT accesses).
8559 * @param GCPtrMem The address of the guest memory.
8560 * @param fAccess How the memory is being accessed. The
8561 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8562 * how to map the memory, while the
8563 * IEM_ACCESS_WHAT_XXX bit is used when raising
8564 * exceptions.
8565 */
8566IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8567{
8568 /*
8569 * Check the input and figure out which mapping entry to use.
8570 */
8571 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8572 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8573 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8574
8575 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8576 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8577 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8578 {
8579 iMemMap = iemMemMapFindFree(pVCpu);
8580 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8581 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8582 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8583 pVCpu->iem.s.aMemMappings[2].fAccess),
8584 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8585 }
8586
8587 /*
8588 * Map the memory, checking that we can actually access it. If something
8589 * slightly complicated happens, fall back on bounce buffering.
8590 */
8591 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8592 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8593 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8594
8595 /* Crossing a page boundary? */
8596 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8597 { /* No (likely). */ }
8598 else
8599 {
8600 void *pvMem;
8601 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8602 if (rcStrict == VINF_SUCCESS)
8603 return pvMem;
8604 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8605 }
8606
8607 RTGCPHYS GCPhysFirst;
8608 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8609 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8610 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8611
8612 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8613 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8614 if (fAccess & IEM_ACCESS_TYPE_READ)
8615 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8616
8617 void *pvMem;
8618 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8619 if (rcStrict == VINF_SUCCESS)
8620 { /* likely */ }
8621 else
8622 {
8623 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8624 if (rcStrict == VINF_SUCCESS)
8625 return pvMem;
8626 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8627 }
8628
8629 /*
8630 * Fill in the mapping table entry.
8631 */
8632 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8633 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8634 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8635 pVCpu->iem.s.cActiveMappings++;
8636
8637 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8638 return pvMem;
8639}
8640
8641
8642/**
8643 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8644 *
8645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8646 * @param pvMem The mapping.
8647 * @param fAccess The kind of access.
8648 */
8649IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8650{
8651 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8652 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8653
8654 /* If it's bounce buffered, we may need to write back the buffer. */
8655 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8656 {
8657 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8658 {
8659 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8660 if (rcStrict == VINF_SUCCESS)
8661 return;
8662 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8663 }
8664 }
8665 /* Otherwise unlock it. */
8666 else
8667 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8668
8669 /* Free the entry. */
8670 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8671 Assert(pVCpu->iem.s.cActiveMappings != 0);
8672 pVCpu->iem.s.cActiveMappings--;
8673}
8674
8675#endif
8676
8677#ifndef IN_RING3
8678/**
8679 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8680 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8681 *
8682 * Allows the instruction to be completed and retired, while the IEM user will
8683 * return to ring-3 immediately afterwards and do the postponed writes there.
8684 *
8685 * @returns VBox status code (no strict statuses). Caller must check
8686 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8688 * @param pvMem The mapping.
8689 * @param fAccess The kind of access.
8690 */
8691IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8692{
8693 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8694 AssertReturn(iMemMap >= 0, iMemMap);
8695
8696 /* If it's bounce buffered, we may need to write back the buffer. */
8697 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8698 {
8699 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8700 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8701 }
8702 /* Otherwise unlock it. */
8703 else
8704 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8705
8706 /* Free the entry. */
8707 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8708 Assert(pVCpu->iem.s.cActiveMappings != 0);
8709 pVCpu->iem.s.cActiveMappings--;
8710 return VINF_SUCCESS;
8711}
8712#endif
8713
8714
8715/**
8716 * Rollbacks mappings, releasing page locks and such.
8717 *
8718 * The caller shall only call this after checking cActiveMappings.
8719 *
8720 * @returns Strict VBox status code to pass up.
8721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8722 */
8723IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8724{
8725 Assert(pVCpu->iem.s.cActiveMappings > 0);
8726
8727 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8728 while (iMemMap-- > 0)
8729 {
8730 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8731 if (fAccess != IEM_ACCESS_INVALID)
8732 {
8733 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8734 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8735 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8736 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8737 Assert(pVCpu->iem.s.cActiveMappings > 0);
8738 pVCpu->iem.s.cActiveMappings--;
8739 }
8740 }
8741}
8742
8743
8744/**
8745 * Fetches a data byte.
8746 *
8747 * @returns Strict VBox status code.
8748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8749 * @param pu8Dst Where to return the byte.
8750 * @param iSegReg The index of the segment register to use for
8751 * this access. The base and limits are checked.
8752 * @param GCPtrMem The address of the guest memory.
8753 */
8754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8755{
8756 /* The lazy approach for now... */
8757 uint8_t const *pu8Src;
8758 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8759 if (rc == VINF_SUCCESS)
8760 {
8761 *pu8Dst = *pu8Src;
8762 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8763 }
8764 return rc;
8765}
8766
8767
8768#ifdef IEM_WITH_SETJMP
8769/**
8770 * Fetches a data byte, longjmp on error.
8771 *
8772 * @returns The byte.
8773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8774 * @param iSegReg The index of the segment register to use for
8775 * this access. The base and limits are checked.
8776 * @param GCPtrMem The address of the guest memory.
8777 */
8778DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8779{
8780 /* The lazy approach for now... */
8781 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8782 uint8_t const bRet = *pu8Src;
8783 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8784 return bRet;
8785}
8786#endif /* IEM_WITH_SETJMP */
8787
8788
8789/**
8790 * Fetches a data word.
8791 *
8792 * @returns Strict VBox status code.
8793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8794 * @param pu16Dst Where to return the word.
8795 * @param iSegReg The index of the segment register to use for
8796 * this access. The base and limits are checked.
8797 * @param GCPtrMem The address of the guest memory.
8798 */
8799IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8800{
8801 /* The lazy approach for now... */
8802 uint16_t const *pu16Src;
8803 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8804 if (rc == VINF_SUCCESS)
8805 {
8806 *pu16Dst = *pu16Src;
8807 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8808 }
8809 return rc;
8810}
8811
8812
8813#ifdef IEM_WITH_SETJMP
8814/**
8815 * Fetches a data word, longjmp on error.
8816 *
8817 * @returns The word
8818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8819 * @param iSegReg The index of the segment register to use for
8820 * this access. The base and limits are checked.
8821 * @param GCPtrMem The address of the guest memory.
8822 */
8823DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8824{
8825 /* The lazy approach for now... */
8826 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8827 uint16_t const u16Ret = *pu16Src;
8828 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8829 return u16Ret;
8830}
8831#endif
8832
8833
8834/**
8835 * Fetches a data dword.
8836 *
8837 * @returns Strict VBox status code.
8838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8839 * @param pu32Dst Where to return the dword.
8840 * @param iSegReg The index of the segment register to use for
8841 * this access. The base and limits are checked.
8842 * @param GCPtrMem The address of the guest memory.
8843 */
8844IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8845{
8846 /* The lazy approach for now... */
8847 uint32_t const *pu32Src;
8848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8849 if (rc == VINF_SUCCESS)
8850 {
8851 *pu32Dst = *pu32Src;
8852 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8853 }
8854 return rc;
8855}
8856
8857
8858#ifdef IEM_WITH_SETJMP
8859
8860IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8861{
8862 Assert(cbMem >= 1);
8863 Assert(iSegReg < X86_SREG_COUNT);
8864
8865 /*
8866 * 64-bit mode is simpler.
8867 */
8868 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8869 {
8870 if (iSegReg >= X86_SREG_FS)
8871 {
8872 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8873 GCPtrMem += pSel->u64Base;
8874 }
8875
8876 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8877 return GCPtrMem;
8878 }
8879 /*
8880 * 16-bit and 32-bit segmentation.
8881 */
8882 else
8883 {
8884 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8885 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8886 == X86DESCATTR_P /* data, expand up */
8887 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8888 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8889 {
8890 /* expand up */
8891 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8892 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8893 && GCPtrLast32 > (uint32_t)GCPtrMem))
8894 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8895 }
8896 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8897 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8898 {
8899 /* expand down */
8900 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8901 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8902 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8903 && GCPtrLast32 > (uint32_t)GCPtrMem))
8904 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8905 }
8906 else
8907 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8908 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8909 }
8910 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8911}
8912
8913
8914IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8915{
8916 Assert(cbMem >= 1);
8917 Assert(iSegReg < X86_SREG_COUNT);
8918
8919 /*
8920 * 64-bit mode is simpler.
8921 */
8922 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8923 {
8924 if (iSegReg >= X86_SREG_FS)
8925 {
8926 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8927 GCPtrMem += pSel->u64Base;
8928 }
8929
8930 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8931 return GCPtrMem;
8932 }
8933 /*
8934 * 16-bit and 32-bit segmentation.
8935 */
8936 else
8937 {
8938 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8939 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8940 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8941 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8942 {
8943 /* expand up */
8944 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8945 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8946 && GCPtrLast32 > (uint32_t)GCPtrMem))
8947 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8948 }
8949 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8950 {
8951 /* expand down */
8952 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8953 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8954 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8955 && GCPtrLast32 > (uint32_t)GCPtrMem))
8956 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8957 }
8958 else
8959 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8960 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8961 }
8962 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8963}
8964
8965
8966/**
8967 * Fetches a data dword, longjmp on error, fallback/safe version.
8968 *
8969 * @returns The dword
8970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8971 * @param iSegReg The index of the segment register to use for
8972 * this access. The base and limits are checked.
8973 * @param GCPtrMem The address of the guest memory.
8974 */
8975IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8976{
8977 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8978 uint32_t const u32Ret = *pu32Src;
8979 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8980 return u32Ret;
8981}
8982
8983
8984/**
8985 * Fetches a data dword, longjmp on error.
8986 *
8987 * @returns The dword
8988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8989 * @param iSegReg The index of the segment register to use for
8990 * this access. The base and limits are checked.
8991 * @param GCPtrMem The address of the guest memory.
8992 */
8993DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8994{
8995# ifdef IEM_WITH_DATA_TLB
8996 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8997 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8998 {
8999 /// @todo more later.
9000 }
9001
9002 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9003# else
9004 /* The lazy approach. */
9005 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9006 uint32_t const u32Ret = *pu32Src;
9007 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9008 return u32Ret;
9009# endif
9010}
9011#endif
9012
9013
9014#ifdef SOME_UNUSED_FUNCTION
9015/**
9016 * Fetches a data dword and sign extends it to a qword.
9017 *
9018 * @returns Strict VBox status code.
9019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9020 * @param pu64Dst Where to return the sign extended value.
9021 * @param iSegReg The index of the segment register to use for
9022 * this access. The base and limits are checked.
9023 * @param GCPtrMem The address of the guest memory.
9024 */
9025IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9026{
9027 /* The lazy approach for now... */
9028 int32_t const *pi32Src;
9029 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9030 if (rc == VINF_SUCCESS)
9031 {
9032 *pu64Dst = *pi32Src;
9033 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9034 }
9035#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9036 else
9037 *pu64Dst = 0;
9038#endif
9039 return rc;
9040}
9041#endif
9042
9043
9044/**
9045 * Fetches a data qword.
9046 *
9047 * @returns Strict VBox status code.
9048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9049 * @param pu64Dst Where to return the qword.
9050 * @param iSegReg The index of the segment register to use for
9051 * this access. The base and limits are checked.
9052 * @param GCPtrMem The address of the guest memory.
9053 */
9054IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9055{
9056 /* The lazy approach for now... */
9057 uint64_t const *pu64Src;
9058 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9059 if (rc == VINF_SUCCESS)
9060 {
9061 *pu64Dst = *pu64Src;
9062 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9063 }
9064 return rc;
9065}
9066
9067
9068#ifdef IEM_WITH_SETJMP
9069/**
9070 * Fetches a data qword, longjmp on error.
9071 *
9072 * @returns The qword.
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param iSegReg The index of the segment register to use for
9075 * this access. The base and limits are checked.
9076 * @param GCPtrMem The address of the guest memory.
9077 */
9078DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9079{
9080 /* The lazy approach for now... */
9081 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9082 uint64_t const u64Ret = *pu64Src;
9083 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9084 return u64Ret;
9085}
9086#endif
9087
9088
9089/**
9090 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9091 *
9092 * @returns Strict VBox status code.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param pu64Dst Where to return the qword.
9095 * @param iSegReg The index of the segment register to use for
9096 * this access. The base and limits are checked.
9097 * @param GCPtrMem The address of the guest memory.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9100{
9101 /* The lazy approach for now... */
9102 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9103 if (RT_UNLIKELY(GCPtrMem & 15))
9104 return iemRaiseGeneralProtectionFault0(pVCpu);
9105
9106 uint64_t const *pu64Src;
9107 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9108 if (rc == VINF_SUCCESS)
9109 {
9110 *pu64Dst = *pu64Src;
9111 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9112 }
9113 return rc;
9114}
9115
9116
9117#ifdef IEM_WITH_SETJMP
9118/**
9119 * Fetches a data qword, longjmp on error.
9120 *
9121 * @returns The qword.
9122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9123 * @param iSegReg The index of the segment register to use for
9124 * this access. The base and limits are checked.
9125 * @param GCPtrMem The address of the guest memory.
9126 */
9127DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9128{
9129 /* The lazy approach for now... */
9130 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9131 if (RT_LIKELY(!(GCPtrMem & 15)))
9132 {
9133 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9134 uint64_t const u64Ret = *pu64Src;
9135 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9136 return u64Ret;
9137 }
9138
9139 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9140 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9141}
9142#endif
9143
9144
9145/**
9146 * Fetches a data tword.
9147 *
9148 * @returns Strict VBox status code.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param pr80Dst Where to return the tword.
9151 * @param iSegReg The index of the segment register to use for
9152 * this access. The base and limits are checked.
9153 * @param GCPtrMem The address of the guest memory.
9154 */
9155IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9156{
9157 /* The lazy approach for now... */
9158 PCRTFLOAT80U pr80Src;
9159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9160 if (rc == VINF_SUCCESS)
9161 {
9162 *pr80Dst = *pr80Src;
9163 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9164 }
9165 return rc;
9166}
9167
9168
9169#ifdef IEM_WITH_SETJMP
9170/**
9171 * Fetches a data tword, longjmp on error.
9172 *
9173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9174 * @param pr80Dst Where to return the tword.
9175 * @param iSegReg The index of the segment register to use for
9176 * this access. The base and limits are checked.
9177 * @param GCPtrMem The address of the guest memory.
9178 */
9179DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9180{
9181 /* The lazy approach for now... */
9182 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9183 *pr80Dst = *pr80Src;
9184 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9185}
9186#endif
9187
9188
9189/**
9190 * Fetches a data dqword (double qword), generally SSE related.
9191 *
9192 * @returns Strict VBox status code.
9193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9194 * @param pu128Dst Where to return the qword.
9195 * @param iSegReg The index of the segment register to use for
9196 * this access. The base and limits are checked.
9197 * @param GCPtrMem The address of the guest memory.
9198 */
9199IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9200{
9201 /* The lazy approach for now... */
9202 PCRTUINT128U pu128Src;
9203 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9204 if (rc == VINF_SUCCESS)
9205 {
9206 pu128Dst->au64[0] = pu128Src->au64[0];
9207 pu128Dst->au64[1] = pu128Src->au64[1];
9208 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9209 }
9210 return rc;
9211}
9212
9213
9214#ifdef IEM_WITH_SETJMP
9215/**
9216 * Fetches a data dqword (double qword), generally SSE related.
9217 *
9218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9219 * @param pu128Dst Where to return the qword.
9220 * @param iSegReg The index of the segment register to use for
9221 * this access. The base and limits are checked.
9222 * @param GCPtrMem The address of the guest memory.
9223 */
9224IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9225{
9226 /* The lazy approach for now... */
9227 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9228 pu128Dst->au64[0] = pu128Src->au64[0];
9229 pu128Dst->au64[1] = pu128Src->au64[1];
9230 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9231}
9232#endif
9233
9234
9235/**
9236 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9237 * related.
9238 *
9239 * Raises \#GP(0) if not aligned.
9240 *
9241 * @returns Strict VBox status code.
9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9243 * @param pu128Dst Where to return the qword.
9244 * @param iSegReg The index of the segment register to use for
9245 * this access. The base and limits are checked.
9246 * @param GCPtrMem The address of the guest memory.
9247 */
9248IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9249{
9250 /* The lazy approach for now... */
9251 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9252 if ( (GCPtrMem & 15)
9253 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9254 return iemRaiseGeneralProtectionFault0(pVCpu);
9255
9256 PCRTUINT128U pu128Src;
9257 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9258 if (rc == VINF_SUCCESS)
9259 {
9260 pu128Dst->au64[0] = pu128Src->au64[0];
9261 pu128Dst->au64[1] = pu128Src->au64[1];
9262 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9263 }
9264 return rc;
9265}
9266
9267
9268#ifdef IEM_WITH_SETJMP
9269/**
9270 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9271 * related, longjmp on error.
9272 *
9273 * Raises \#GP(0) if not aligned.
9274 *
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param pu128Dst Where to return the qword.
9277 * @param iSegReg The index of the segment register to use for
9278 * this access. The base and limits are checked.
9279 * @param GCPtrMem The address of the guest memory.
9280 */
9281DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9282{
9283 /* The lazy approach for now... */
9284 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9285 if ( (GCPtrMem & 15) == 0
9286 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9287 {
9288 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9289 pu128Dst->au64[0] = pu128Src->au64[0];
9290 pu128Dst->au64[1] = pu128Src->au64[1];
9291 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9292 return;
9293 }
9294
9295 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9296 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9297}
9298#endif
9299
9300
9301
9302/**
9303 * Fetches a descriptor register (lgdt, lidt).
9304 *
9305 * @returns Strict VBox status code.
9306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9307 * @param pcbLimit Where to return the limit.
9308 * @param pGCPtrBase Where to return the base.
9309 * @param iSegReg The index of the segment register to use for
9310 * this access. The base and limits are checked.
9311 * @param GCPtrMem The address of the guest memory.
9312 * @param enmOpSize The effective operand size.
9313 */
9314IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9315 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9316{
9317 /*
9318 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9319 * little special:
9320 * - The two reads are done separately.
9321 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9322 * - We suspect the 386 to actually commit the limit before the base in
9323 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9324 * don't try emulate this eccentric behavior, because it's not well
9325 * enough understood and rather hard to trigger.
9326 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9327 */
9328 VBOXSTRICTRC rcStrict;
9329 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9330 {
9331 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9332 if (rcStrict == VINF_SUCCESS)
9333 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9334 }
9335 else
9336 {
9337 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9338 if (enmOpSize == IEMMODE_32BIT)
9339 {
9340 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9341 {
9342 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9343 if (rcStrict == VINF_SUCCESS)
9344 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9345 }
9346 else
9347 {
9348 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9349 if (rcStrict == VINF_SUCCESS)
9350 {
9351 *pcbLimit = (uint16_t)uTmp;
9352 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9353 }
9354 }
9355 if (rcStrict == VINF_SUCCESS)
9356 *pGCPtrBase = uTmp;
9357 }
9358 else
9359 {
9360 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9361 if (rcStrict == VINF_SUCCESS)
9362 {
9363 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9364 if (rcStrict == VINF_SUCCESS)
9365 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9366 }
9367 }
9368 }
9369 return rcStrict;
9370}
9371
9372
9373
9374/**
9375 * Stores a data byte.
9376 *
9377 * @returns Strict VBox status code.
9378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9379 * @param iSegReg The index of the segment register to use for
9380 * this access. The base and limits are checked.
9381 * @param GCPtrMem The address of the guest memory.
9382 * @param u8Value The value to store.
9383 */
9384IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9385{
9386 /* The lazy approach for now... */
9387 uint8_t *pu8Dst;
9388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9389 if (rc == VINF_SUCCESS)
9390 {
9391 *pu8Dst = u8Value;
9392 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9393 }
9394 return rc;
9395}
9396
9397
9398#ifdef IEM_WITH_SETJMP
9399/**
9400 * Stores a data byte, longjmp on error.
9401 *
9402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9403 * @param iSegReg The index of the segment register to use for
9404 * this access. The base and limits are checked.
9405 * @param GCPtrMem The address of the guest memory.
9406 * @param u8Value The value to store.
9407 */
9408IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9409{
9410 /* The lazy approach for now... */
9411 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9412 *pu8Dst = u8Value;
9413 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9414}
9415#endif
9416
9417
9418/**
9419 * Stores a data word.
9420 *
9421 * @returns Strict VBox status code.
9422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9423 * @param iSegReg The index of the segment register to use for
9424 * this access. The base and limits are checked.
9425 * @param GCPtrMem The address of the guest memory.
9426 * @param u16Value The value to store.
9427 */
9428IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9429{
9430 /* The lazy approach for now... */
9431 uint16_t *pu16Dst;
9432 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9433 if (rc == VINF_SUCCESS)
9434 {
9435 *pu16Dst = u16Value;
9436 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9437 }
9438 return rc;
9439}
9440
9441
9442#ifdef IEM_WITH_SETJMP
9443/**
9444 * Stores a data word, longjmp on error.
9445 *
9446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9447 * @param iSegReg The index of the segment register to use for
9448 * this access. The base and limits are checked.
9449 * @param GCPtrMem The address of the guest memory.
9450 * @param u16Value The value to store.
9451 */
9452IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9453{
9454 /* The lazy approach for now... */
9455 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9456 *pu16Dst = u16Value;
9457 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9458}
9459#endif
9460
9461
9462/**
9463 * Stores a data dword.
9464 *
9465 * @returns Strict VBox status code.
9466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9467 * @param iSegReg The index of the segment register to use for
9468 * this access. The base and limits are checked.
9469 * @param GCPtrMem The address of the guest memory.
9470 * @param u32Value The value to store.
9471 */
9472IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9473{
9474 /* The lazy approach for now... */
9475 uint32_t *pu32Dst;
9476 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9477 if (rc == VINF_SUCCESS)
9478 {
9479 *pu32Dst = u32Value;
9480 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9481 }
9482 return rc;
9483}
9484
9485
9486#ifdef IEM_WITH_SETJMP
9487/**
9488 * Stores a data dword.
9489 *
9490 * @returns Strict VBox status code.
9491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9492 * @param iSegReg The index of the segment register to use for
9493 * this access. The base and limits are checked.
9494 * @param GCPtrMem The address of the guest memory.
9495 * @param u32Value The value to store.
9496 */
9497IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9498{
9499 /* The lazy approach for now... */
9500 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9501 *pu32Dst = u32Value;
9502 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9503}
9504#endif
9505
9506
9507/**
9508 * Stores a data qword.
9509 *
9510 * @returns Strict VBox status code.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 * @param u64Value The value to store.
9516 */
9517IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9518{
9519 /* The lazy approach for now... */
9520 uint64_t *pu64Dst;
9521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9522 if (rc == VINF_SUCCESS)
9523 {
9524 *pu64Dst = u64Value;
9525 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9526 }
9527 return rc;
9528}
9529
9530
9531#ifdef IEM_WITH_SETJMP
9532/**
9533 * Stores a data qword, longjmp on error.
9534 *
9535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9536 * @param iSegReg The index of the segment register to use for
9537 * this access. The base and limits are checked.
9538 * @param GCPtrMem The address of the guest memory.
9539 * @param u64Value The value to store.
9540 */
9541IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9542{
9543 /* The lazy approach for now... */
9544 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9545 *pu64Dst = u64Value;
9546 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9547}
9548#endif
9549
9550
9551/**
9552 * Stores a data dqword.
9553 *
9554 * @returns Strict VBox status code.
9555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9556 * @param iSegReg The index of the segment register to use for
9557 * this access. The base and limits are checked.
9558 * @param GCPtrMem The address of the guest memory.
9559 * @param u128Value The value to store.
9560 */
9561IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9562{
9563 /* The lazy approach for now... */
9564 PRTUINT128U pu128Dst;
9565 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9566 if (rc == VINF_SUCCESS)
9567 {
9568 pu128Dst->au64[0] = u128Value.au64[0];
9569 pu128Dst->au64[1] = u128Value.au64[1];
9570 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9571 }
9572 return rc;
9573}
9574
9575
9576#ifdef IEM_WITH_SETJMP
9577/**
9578 * Stores a data dqword, longjmp on error.
9579 *
9580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9581 * @param iSegReg The index of the segment register to use for
9582 * this access. The base and limits are checked.
9583 * @param GCPtrMem The address of the guest memory.
9584 * @param u128Value The value to store.
9585 */
9586IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9587{
9588 /* The lazy approach for now... */
9589 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9590 pu128Dst->au64[0] = u128Value.au64[0];
9591 pu128Dst->au64[1] = u128Value.au64[1];
9592 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9593}
9594#endif
9595
9596
9597/**
9598 * Stores a data dqword, SSE aligned.
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param iSegReg The index of the segment register to use for
9603 * this access. The base and limits are checked.
9604 * @param GCPtrMem The address of the guest memory.
9605 * @param u128Value The value to store.
9606 */
9607IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9608{
9609 /* The lazy approach for now... */
9610 if ( (GCPtrMem & 15)
9611 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9612 return iemRaiseGeneralProtectionFault0(pVCpu);
9613
9614 PRTUINT128U pu128Dst;
9615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9616 if (rc == VINF_SUCCESS)
9617 {
9618 pu128Dst->au64[0] = u128Value.au64[0];
9619 pu128Dst->au64[1] = u128Value.au64[1];
9620 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9621 }
9622 return rc;
9623}
9624
9625
9626#ifdef IEM_WITH_SETJMP
9627/**
9628 * Stores a data dqword, SSE aligned.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param iSegReg The index of the segment register to use for
9633 * this access. The base and limits are checked.
9634 * @param GCPtrMem The address of the guest memory.
9635 * @param u128Value The value to store.
9636 */
9637DECL_NO_INLINE(IEM_STATIC, void)
9638iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9639{
9640 /* The lazy approach for now... */
9641 if ( (GCPtrMem & 15) == 0
9642 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9643 {
9644 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9645 pu128Dst->au64[0] = u128Value.au64[0];
9646 pu128Dst->au64[1] = u128Value.au64[1];
9647 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9648 return;
9649 }
9650
9651 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9652 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9653}
9654#endif
9655
9656
9657/**
9658 * Stores a descriptor register (sgdt, sidt).
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param cbLimit The limit.
9663 * @param GCPtrBase The base address.
9664 * @param iSegReg The index of the segment register to use for
9665 * this access. The base and limits are checked.
9666 * @param GCPtrMem The address of the guest memory.
9667 */
9668IEM_STATIC VBOXSTRICTRC
9669iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9670{
9671 /*
9672 * The SIDT and SGDT instructions actually stores the data using two
9673 * independent writes. The instructions does not respond to opsize prefixes.
9674 */
9675 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9676 if (rcStrict == VINF_SUCCESS)
9677 {
9678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9679 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9680 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9681 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9682 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9683 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9684 else
9685 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9686 }
9687 return rcStrict;
9688}
9689
9690
9691/**
9692 * Pushes a word onto the stack.
9693 *
9694 * @returns Strict VBox status code.
9695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9696 * @param u16Value The value to push.
9697 */
9698IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9699{
9700 /* Increment the stack pointer. */
9701 uint64_t uNewRsp;
9702 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9703 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9704
9705 /* Write the word the lazy way. */
9706 uint16_t *pu16Dst;
9707 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9708 if (rc == VINF_SUCCESS)
9709 {
9710 *pu16Dst = u16Value;
9711 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9712 }
9713
9714 /* Commit the new RSP value unless we an access handler made trouble. */
9715 if (rc == VINF_SUCCESS)
9716 pCtx->rsp = uNewRsp;
9717
9718 return rc;
9719}
9720
9721
9722/**
9723 * Pushes a dword onto the stack.
9724 *
9725 * @returns Strict VBox status code.
9726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9727 * @param u32Value The value to push.
9728 */
9729IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9730{
9731 /* Increment the stack pointer. */
9732 uint64_t uNewRsp;
9733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9734 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9735
9736 /* Write the dword the lazy way. */
9737 uint32_t *pu32Dst;
9738 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9739 if (rc == VINF_SUCCESS)
9740 {
9741 *pu32Dst = u32Value;
9742 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9743 }
9744
9745 /* Commit the new RSP value unless we an access handler made trouble. */
9746 if (rc == VINF_SUCCESS)
9747 pCtx->rsp = uNewRsp;
9748
9749 return rc;
9750}
9751
9752
9753/**
9754 * Pushes a dword segment register value onto the stack.
9755 *
9756 * @returns Strict VBox status code.
9757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9758 * @param u32Value The value to push.
9759 */
9760IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9761{
9762 /* Increment the stack pointer. */
9763 uint64_t uNewRsp;
9764 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9765 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9766
9767 VBOXSTRICTRC rc;
9768 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9769 {
9770 /* The recompiler writes a full dword. */
9771 uint32_t *pu32Dst;
9772 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9773 if (rc == VINF_SUCCESS)
9774 {
9775 *pu32Dst = u32Value;
9776 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9777 }
9778 }
9779 else
9780 {
9781 /* The intel docs talks about zero extending the selector register
9782 value. My actual intel CPU here might be zero extending the value
9783 but it still only writes the lower word... */
9784 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9785 * happens when crossing an electric page boundrary, is the high word checked
9786 * for write accessibility or not? Probably it is. What about segment limits?
9787 * It appears this behavior is also shared with trap error codes.
9788 *
9789 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9790 * ancient hardware when it actually did change. */
9791 uint16_t *pu16Dst;
9792 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9793 if (rc == VINF_SUCCESS)
9794 {
9795 *pu16Dst = (uint16_t)u32Value;
9796 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9797 }
9798 }
9799
9800 /* Commit the new RSP value unless we an access handler made trouble. */
9801 if (rc == VINF_SUCCESS)
9802 pCtx->rsp = uNewRsp;
9803
9804 return rc;
9805}
9806
9807
9808/**
9809 * Pushes a qword onto the stack.
9810 *
9811 * @returns Strict VBox status code.
9812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9813 * @param u64Value The value to push.
9814 */
9815IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9816{
9817 /* Increment the stack pointer. */
9818 uint64_t uNewRsp;
9819 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9820 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9821
9822 /* Write the word the lazy way. */
9823 uint64_t *pu64Dst;
9824 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9825 if (rc == VINF_SUCCESS)
9826 {
9827 *pu64Dst = u64Value;
9828 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9829 }
9830
9831 /* Commit the new RSP value unless we an access handler made trouble. */
9832 if (rc == VINF_SUCCESS)
9833 pCtx->rsp = uNewRsp;
9834
9835 return rc;
9836}
9837
9838
9839/**
9840 * Pops a word from the stack.
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param pu16Value Where to store the popped value.
9845 */
9846IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9847{
9848 /* Increment the stack pointer. */
9849 uint64_t uNewRsp;
9850 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9851 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9852
9853 /* Write the word the lazy way. */
9854 uint16_t const *pu16Src;
9855 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9856 if (rc == VINF_SUCCESS)
9857 {
9858 *pu16Value = *pu16Src;
9859 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9860
9861 /* Commit the new RSP value. */
9862 if (rc == VINF_SUCCESS)
9863 pCtx->rsp = uNewRsp;
9864 }
9865
9866 return rc;
9867}
9868
9869
9870/**
9871 * Pops a dword from the stack.
9872 *
9873 * @returns Strict VBox status code.
9874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9875 * @param pu32Value Where to store the popped value.
9876 */
9877IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9878{
9879 /* Increment the stack pointer. */
9880 uint64_t uNewRsp;
9881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9882 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9883
9884 /* Write the word the lazy way. */
9885 uint32_t const *pu32Src;
9886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9887 if (rc == VINF_SUCCESS)
9888 {
9889 *pu32Value = *pu32Src;
9890 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9891
9892 /* Commit the new RSP value. */
9893 if (rc == VINF_SUCCESS)
9894 pCtx->rsp = uNewRsp;
9895 }
9896
9897 return rc;
9898}
9899
9900
9901/**
9902 * Pops a qword from the stack.
9903 *
9904 * @returns Strict VBox status code.
9905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9906 * @param pu64Value Where to store the popped value.
9907 */
9908IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9909{
9910 /* Increment the stack pointer. */
9911 uint64_t uNewRsp;
9912 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9913 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9914
9915 /* Write the word the lazy way. */
9916 uint64_t const *pu64Src;
9917 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9918 if (rc == VINF_SUCCESS)
9919 {
9920 *pu64Value = *pu64Src;
9921 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9922
9923 /* Commit the new RSP value. */
9924 if (rc == VINF_SUCCESS)
9925 pCtx->rsp = uNewRsp;
9926 }
9927
9928 return rc;
9929}
9930
9931
9932/**
9933 * Pushes a word onto the stack, using a temporary stack pointer.
9934 *
9935 * @returns Strict VBox status code.
9936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9937 * @param u16Value The value to push.
9938 * @param pTmpRsp Pointer to the temporary stack pointer.
9939 */
9940IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9941{
9942 /* Increment the stack pointer. */
9943 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9944 RTUINT64U NewRsp = *pTmpRsp;
9945 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9946
9947 /* Write the word the lazy way. */
9948 uint16_t *pu16Dst;
9949 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9950 if (rc == VINF_SUCCESS)
9951 {
9952 *pu16Dst = u16Value;
9953 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9954 }
9955
9956 /* Commit the new RSP value unless we an access handler made trouble. */
9957 if (rc == VINF_SUCCESS)
9958 *pTmpRsp = NewRsp;
9959
9960 return rc;
9961}
9962
9963
9964/**
9965 * Pushes a dword onto the stack, using a temporary stack pointer.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param u32Value The value to push.
9970 * @param pTmpRsp Pointer to the temporary stack pointer.
9971 */
9972IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9973{
9974 /* Increment the stack pointer. */
9975 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9976 RTUINT64U NewRsp = *pTmpRsp;
9977 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9978
9979 /* Write the word the lazy way. */
9980 uint32_t *pu32Dst;
9981 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9982 if (rc == VINF_SUCCESS)
9983 {
9984 *pu32Dst = u32Value;
9985 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9986 }
9987
9988 /* Commit the new RSP value unless we an access handler made trouble. */
9989 if (rc == VINF_SUCCESS)
9990 *pTmpRsp = NewRsp;
9991
9992 return rc;
9993}
9994
9995
9996/**
9997 * Pushes a dword onto the stack, using a temporary stack pointer.
9998 *
9999 * @returns Strict VBox status code.
10000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10001 * @param u64Value The value to push.
10002 * @param pTmpRsp Pointer to the temporary stack pointer.
10003 */
10004IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10005{
10006 /* Increment the stack pointer. */
10007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10008 RTUINT64U NewRsp = *pTmpRsp;
10009 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10010
10011 /* Write the word the lazy way. */
10012 uint64_t *pu64Dst;
10013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10014 if (rc == VINF_SUCCESS)
10015 {
10016 *pu64Dst = u64Value;
10017 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10018 }
10019
10020 /* Commit the new RSP value unless we an access handler made trouble. */
10021 if (rc == VINF_SUCCESS)
10022 *pTmpRsp = NewRsp;
10023
10024 return rc;
10025}
10026
10027
10028/**
10029 * Pops a word from the stack, using a temporary stack pointer.
10030 *
10031 * @returns Strict VBox status code.
10032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10033 * @param pu16Value Where to store the popped value.
10034 * @param pTmpRsp Pointer to the temporary stack pointer.
10035 */
10036IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10037{
10038 /* Increment the stack pointer. */
10039 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10040 RTUINT64U NewRsp = *pTmpRsp;
10041 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10042
10043 /* Write the word the lazy way. */
10044 uint16_t const *pu16Src;
10045 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10046 if (rc == VINF_SUCCESS)
10047 {
10048 *pu16Value = *pu16Src;
10049 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10050
10051 /* Commit the new RSP value. */
10052 if (rc == VINF_SUCCESS)
10053 *pTmpRsp = NewRsp;
10054 }
10055
10056 return rc;
10057}
10058
10059
10060/**
10061 * Pops a dword from the stack, using a temporary stack pointer.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param pu32Value Where to store the popped value.
10066 * @param pTmpRsp Pointer to the temporary stack pointer.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10069{
10070 /* Increment the stack pointer. */
10071 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10072 RTUINT64U NewRsp = *pTmpRsp;
10073 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10074
10075 /* Write the word the lazy way. */
10076 uint32_t const *pu32Src;
10077 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10078 if (rc == VINF_SUCCESS)
10079 {
10080 *pu32Value = *pu32Src;
10081 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10082
10083 /* Commit the new RSP value. */
10084 if (rc == VINF_SUCCESS)
10085 *pTmpRsp = NewRsp;
10086 }
10087
10088 return rc;
10089}
10090
10091
10092/**
10093 * Pops a qword from the stack, using a temporary stack pointer.
10094 *
10095 * @returns Strict VBox status code.
10096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10097 * @param pu64Value Where to store the popped value.
10098 * @param pTmpRsp Pointer to the temporary stack pointer.
10099 */
10100IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10101{
10102 /* Increment the stack pointer. */
10103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10104 RTUINT64U NewRsp = *pTmpRsp;
10105 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10106
10107 /* Write the word the lazy way. */
10108 uint64_t const *pu64Src;
10109 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10110 if (rcStrict == VINF_SUCCESS)
10111 {
10112 *pu64Value = *pu64Src;
10113 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10114
10115 /* Commit the new RSP value. */
10116 if (rcStrict == VINF_SUCCESS)
10117 *pTmpRsp = NewRsp;
10118 }
10119
10120 return rcStrict;
10121}
10122
10123
10124/**
10125 * Begin a special stack push (used by interrupt, exceptions and such).
10126 *
10127 * This will raise \#SS or \#PF if appropriate.
10128 *
10129 * @returns Strict VBox status code.
10130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10131 * @param cbMem The number of bytes to push onto the stack.
10132 * @param ppvMem Where to return the pointer to the stack memory.
10133 * As with the other memory functions this could be
10134 * direct access or bounce buffered access, so
10135 * don't commit register until the commit call
10136 * succeeds.
10137 * @param puNewRsp Where to return the new RSP value. This must be
10138 * passed unchanged to
10139 * iemMemStackPushCommitSpecial().
10140 */
10141IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10142{
10143 Assert(cbMem < UINT8_MAX);
10144 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10145 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10146 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10147}
10148
10149
10150/**
10151 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10152 *
10153 * This will update the rSP.
10154 *
10155 * @returns Strict VBox status code.
10156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10157 * @param pvMem The pointer returned by
10158 * iemMemStackPushBeginSpecial().
10159 * @param uNewRsp The new RSP value returned by
10160 * iemMemStackPushBeginSpecial().
10161 */
10162IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10163{
10164 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10165 if (rcStrict == VINF_SUCCESS)
10166 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10167 return rcStrict;
10168}
10169
10170
10171/**
10172 * Begin a special stack pop (used by iret, retf and such).
10173 *
10174 * This will raise \#SS or \#PF if appropriate.
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10178 * @param cbMem The number of bytes to pop from the stack.
10179 * @param ppvMem Where to return the pointer to the stack memory.
10180 * @param puNewRsp Where to return the new RSP value. This must be
10181 * assigned to CPUMCTX::rsp manually some time
10182 * after iemMemStackPopDoneSpecial() has been
10183 * called.
10184 */
10185IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10186{
10187 Assert(cbMem < UINT8_MAX);
10188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10189 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10190 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10191}
10192
10193
10194/**
10195 * Continue a special stack pop (used by iret and retf).
10196 *
10197 * This will raise \#SS or \#PF if appropriate.
10198 *
10199 * @returns Strict VBox status code.
10200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10201 * @param cbMem The number of bytes to pop from the stack.
10202 * @param ppvMem Where to return the pointer to the stack memory.
10203 * @param puNewRsp Where to return the new RSP value. This must be
10204 * assigned to CPUMCTX::rsp manually some time
10205 * after iemMemStackPopDoneSpecial() has been
10206 * called.
10207 */
10208IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10209{
10210 Assert(cbMem < UINT8_MAX);
10211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10212 RTUINT64U NewRsp;
10213 NewRsp.u = *puNewRsp;
10214 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10215 *puNewRsp = NewRsp.u;
10216 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10217}
10218
10219
10220/**
10221 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10222 * iemMemStackPopContinueSpecial).
10223 *
10224 * The caller will manually commit the rSP.
10225 *
10226 * @returns Strict VBox status code.
10227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10228 * @param pvMem The pointer returned by
10229 * iemMemStackPopBeginSpecial() or
10230 * iemMemStackPopContinueSpecial().
10231 */
10232IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10233{
10234 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10235}
10236
10237
10238/**
10239 * Fetches a system table byte.
10240 *
10241 * @returns Strict VBox status code.
10242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10243 * @param pbDst Where to return the byte.
10244 * @param iSegReg The index of the segment register to use for
10245 * this access. The base and limits are checked.
10246 * @param GCPtrMem The address of the guest memory.
10247 */
10248IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10249{
10250 /* The lazy approach for now... */
10251 uint8_t const *pbSrc;
10252 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10253 if (rc == VINF_SUCCESS)
10254 {
10255 *pbDst = *pbSrc;
10256 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10257 }
10258 return rc;
10259}
10260
10261
10262/**
10263 * Fetches a system table word.
10264 *
10265 * @returns Strict VBox status code.
10266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10267 * @param pu16Dst Where to return the word.
10268 * @param iSegReg The index of the segment register to use for
10269 * this access. The base and limits are checked.
10270 * @param GCPtrMem The address of the guest memory.
10271 */
10272IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10273{
10274 /* The lazy approach for now... */
10275 uint16_t const *pu16Src;
10276 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10277 if (rc == VINF_SUCCESS)
10278 {
10279 *pu16Dst = *pu16Src;
10280 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10281 }
10282 return rc;
10283}
10284
10285
10286/**
10287 * Fetches a system table dword.
10288 *
10289 * @returns Strict VBox status code.
10290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10291 * @param pu32Dst Where to return the dword.
10292 * @param iSegReg The index of the segment register to use for
10293 * this access. The base and limits are checked.
10294 * @param GCPtrMem The address of the guest memory.
10295 */
10296IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10297{
10298 /* The lazy approach for now... */
10299 uint32_t const *pu32Src;
10300 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10301 if (rc == VINF_SUCCESS)
10302 {
10303 *pu32Dst = *pu32Src;
10304 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10305 }
10306 return rc;
10307}
10308
10309
10310/**
10311 * Fetches a system table qword.
10312 *
10313 * @returns Strict VBox status code.
10314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10315 * @param pu64Dst Where to return the qword.
10316 * @param iSegReg The index of the segment register to use for
10317 * this access. The base and limits are checked.
10318 * @param GCPtrMem The address of the guest memory.
10319 */
10320IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10321{
10322 /* The lazy approach for now... */
10323 uint64_t const *pu64Src;
10324 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10325 if (rc == VINF_SUCCESS)
10326 {
10327 *pu64Dst = *pu64Src;
10328 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10329 }
10330 return rc;
10331}
10332
10333
10334/**
10335 * Fetches a descriptor table entry with caller specified error code.
10336 *
10337 * @returns Strict VBox status code.
10338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10339 * @param pDesc Where to return the descriptor table entry.
10340 * @param uSel The selector which table entry to fetch.
10341 * @param uXcpt The exception to raise on table lookup error.
10342 * @param uErrorCode The error code associated with the exception.
10343 */
10344IEM_STATIC VBOXSTRICTRC
10345iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10346{
10347 AssertPtr(pDesc);
10348 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10349
10350 /** @todo did the 286 require all 8 bytes to be accessible? */
10351 /*
10352 * Get the selector table base and check bounds.
10353 */
10354 RTGCPTR GCPtrBase;
10355 if (uSel & X86_SEL_LDT)
10356 {
10357 if ( !pCtx->ldtr.Attr.n.u1Present
10358 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10359 {
10360 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10361 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10362 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10363 uErrorCode, 0);
10364 }
10365
10366 Assert(pCtx->ldtr.Attr.n.u1Present);
10367 GCPtrBase = pCtx->ldtr.u64Base;
10368 }
10369 else
10370 {
10371 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10372 {
10373 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10374 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10375 uErrorCode, 0);
10376 }
10377 GCPtrBase = pCtx->gdtr.pGdt;
10378 }
10379
10380 /*
10381 * Read the legacy descriptor and maybe the long mode extensions if
10382 * required.
10383 */
10384 VBOXSTRICTRC rcStrict;
10385 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10386 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10387 else
10388 {
10389 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10390 if (rcStrict == VINF_SUCCESS)
10391 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10392 if (rcStrict == VINF_SUCCESS)
10393 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10394 if (rcStrict == VINF_SUCCESS)
10395 pDesc->Legacy.au16[3] = 0;
10396 else
10397 return rcStrict;
10398 }
10399
10400 if (rcStrict == VINF_SUCCESS)
10401 {
10402 if ( !IEM_IS_LONG_MODE(pVCpu)
10403 || pDesc->Legacy.Gen.u1DescType)
10404 pDesc->Long.au64[1] = 0;
10405 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10406 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10407 else
10408 {
10409 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10410 /** @todo is this the right exception? */
10411 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10412 }
10413 }
10414 return rcStrict;
10415}
10416
10417
10418/**
10419 * Fetches a descriptor table entry.
10420 *
10421 * @returns Strict VBox status code.
10422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10423 * @param pDesc Where to return the descriptor table entry.
10424 * @param uSel The selector which table entry to fetch.
10425 * @param uXcpt The exception to raise on table lookup error.
10426 */
10427IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10428{
10429 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10430}
10431
10432
10433/**
10434 * Fakes a long mode stack selector for SS = 0.
10435 *
10436 * @param pDescSs Where to return the fake stack descriptor.
10437 * @param uDpl The DPL we want.
10438 */
10439IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10440{
10441 pDescSs->Long.au64[0] = 0;
10442 pDescSs->Long.au64[1] = 0;
10443 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10444 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10445 pDescSs->Long.Gen.u2Dpl = uDpl;
10446 pDescSs->Long.Gen.u1Present = 1;
10447 pDescSs->Long.Gen.u1Long = 1;
10448}
10449
10450
10451/**
10452 * Marks the selector descriptor as accessed (only non-system descriptors).
10453 *
10454 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10455 * will therefore skip the limit checks.
10456 *
10457 * @returns Strict VBox status code.
10458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10459 * @param uSel The selector.
10460 */
10461IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10462{
10463 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10464
10465 /*
10466 * Get the selector table base and calculate the entry address.
10467 */
10468 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10469 ? pCtx->ldtr.u64Base
10470 : pCtx->gdtr.pGdt;
10471 GCPtr += uSel & X86_SEL_MASK;
10472
10473 /*
10474 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10475 * ugly stuff to avoid this. This will make sure it's an atomic access
10476 * as well more or less remove any question about 8-bit or 32-bit accesss.
10477 */
10478 VBOXSTRICTRC rcStrict;
10479 uint32_t volatile *pu32;
10480 if ((GCPtr & 3) == 0)
10481 {
10482 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10483 GCPtr += 2 + 2;
10484 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10485 if (rcStrict != VINF_SUCCESS)
10486 return rcStrict;
10487 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10488 }
10489 else
10490 {
10491 /* The misaligned GDT/LDT case, map the whole thing. */
10492 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10493 if (rcStrict != VINF_SUCCESS)
10494 return rcStrict;
10495 switch ((uintptr_t)pu32 & 3)
10496 {
10497 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10498 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10499 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10500 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10501 }
10502 }
10503
10504 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10505}
10506
10507/** @} */
10508
10509
10510/*
10511 * Include the C/C++ implementation of instruction.
10512 */
10513#include "IEMAllCImpl.cpp.h"
10514
10515
10516
10517/** @name "Microcode" macros.
10518 *
10519 * The idea is that we should be able to use the same code to interpret
10520 * instructions as well as recompiler instructions. Thus this obfuscation.
10521 *
10522 * @{
10523 */
10524#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10525#define IEM_MC_END() }
10526#define IEM_MC_PAUSE() do {} while (0)
10527#define IEM_MC_CONTINUE() do {} while (0)
10528
10529/** Internal macro. */
10530#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10531 do \
10532 { \
10533 VBOXSTRICTRC rcStrict2 = a_Expr; \
10534 if (rcStrict2 != VINF_SUCCESS) \
10535 return rcStrict2; \
10536 } while (0)
10537
10538
10539#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10540#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10541#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10542#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10543#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10544#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10545#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10546#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10547#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10548 do { \
10549 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10550 return iemRaiseDeviceNotAvailable(pVCpu); \
10551 } while (0)
10552#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10553 do { \
10554 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10555 return iemRaiseDeviceNotAvailable(pVCpu); \
10556 } while (0)
10557#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10558 do { \
10559 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10560 return iemRaiseMathFault(pVCpu); \
10561 } while (0)
10562#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10563 do { \
10564 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10565 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10566 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10567 return iemRaiseUndefinedOpcode(pVCpu); \
10568 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10569 return iemRaiseDeviceNotAvailable(pVCpu); \
10570 } while (0)
10571#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10572 do { \
10573 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10574 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10575 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10576 return iemRaiseUndefinedOpcode(pVCpu); \
10577 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10578 return iemRaiseDeviceNotAvailable(pVCpu); \
10579 } while (0)
10580#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10581 do { \
10582 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10583 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10584 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10585 return iemRaiseUndefinedOpcode(pVCpu); \
10586 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10587 return iemRaiseDeviceNotAvailable(pVCpu); \
10588 } while (0)
10589#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10590 do { \
10591 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10592 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10593 return iemRaiseUndefinedOpcode(pVCpu); \
10594 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10595 return iemRaiseDeviceNotAvailable(pVCpu); \
10596 } while (0)
10597#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10598 do { \
10599 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10600 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10601 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10602 return iemRaiseUndefinedOpcode(pVCpu); \
10603 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10604 return iemRaiseDeviceNotAvailable(pVCpu); \
10605 } while (0)
10606#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10607 do { \
10608 if (pVCpu->iem.s.uCpl != 0) \
10609 return iemRaiseGeneralProtectionFault0(pVCpu); \
10610 } while (0)
10611#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10612 do { \
10613 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10614 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10615 } while (0)
10616
10617
10618#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10619#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10620#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10621#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10622#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10623#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10624#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10625 uint32_t a_Name; \
10626 uint32_t *a_pName = &a_Name
10627#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10628 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10629
10630#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10631#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10632
10633#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10634#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10635#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10636#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10637#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10638#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10639#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10640#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10641#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10642#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10643#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10644#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10645#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10646#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10647#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10648#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10649#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10650#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10651#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10652#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10653#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10654#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10655#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10656#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10657#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10658#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10659#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10660#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10661#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10662/** @note Not for IOPL or IF testing or modification. */
10663#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10664#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10665#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10666#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10667
10668#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10669#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10670#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10671#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10672#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10673#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10674#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10675#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10676#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10677#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10678#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10679 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10680
10681#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10682#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10683/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10684 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10685#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10686#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10687/** @note Not for IOPL or IF testing or modification. */
10688#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10689
10690#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10691#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10692#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10693 do { \
10694 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10695 *pu32Reg += (a_u32Value); \
10696 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10697 } while (0)
10698#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10699
10700#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10701#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10702#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10703 do { \
10704 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10705 *pu32Reg -= (a_u32Value); \
10706 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10707 } while (0)
10708#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10709#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10710
10711#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10712#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10713#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10714#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10715#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10716#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10717#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10718
10719#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10720#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10721#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10722#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10723
10724#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10725#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10726#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10727
10728#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10729#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10730#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10731
10732#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10733#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10734#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10735
10736#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10737#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10738#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10739
10740#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10741
10742#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10743
10744#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10745#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10746#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10747 do { \
10748 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10749 *pu32Reg &= (a_u32Value); \
10750 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10751 } while (0)
10752#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10753
10754#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10755#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10756#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10757 do { \
10758 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10759 *pu32Reg |= (a_u32Value); \
10760 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10761 } while (0)
10762#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10763
10764
10765/** @note Not for IOPL or IF modification. */
10766#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10767/** @note Not for IOPL or IF modification. */
10768#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10769/** @note Not for IOPL or IF modification. */
10770#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10771
10772#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10773
10774
10775#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10776 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10777#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10778 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10779#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10780 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10781#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10782 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10783#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10784 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10785#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10786 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10787#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10788 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10789
10790#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10791 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
10792 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
10793 } while (0)
10794#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10795 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10796#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10797 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10798#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
10799 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
10800#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10801 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
10802 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
10803 } while (0)
10804#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10805 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10806#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10807 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10808 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10809 } while (0)
10810#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
10811 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
10812#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10813 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10814 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10815 } while (0)
10816#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10817 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10818#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10819 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10820#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10821 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10822#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10823 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
10824 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
10825 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
10826 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
10827 } while (0)
10828
10829#ifndef IEM_WITH_SETJMP
10830# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10832# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10834# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10836#else
10837# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10838 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10839# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10840 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10841# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10842 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10843#endif
10844
10845#ifndef IEM_WITH_SETJMP
10846# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10847 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10848# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10850# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10852#else
10853# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10854 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10855# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10856 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10857# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10858 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10859#endif
10860
10861#ifndef IEM_WITH_SETJMP
10862# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10864# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10865 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10866# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10867 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10868#else
10869# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10870 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10871# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10872 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10873# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10874 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10875#endif
10876
10877#ifdef SOME_UNUSED_FUNCTION
10878# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10879 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10880#endif
10881
10882#ifndef IEM_WITH_SETJMP
10883# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10884 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10885# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10887# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10889# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10890 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10891#else
10892# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10893 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10894# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10895 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10896# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10897 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10898# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10899 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10900#endif
10901
10902#ifndef IEM_WITH_SETJMP
10903# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10904 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10905# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10906 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10907# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10908 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10909#else
10910# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10911 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10912# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10913 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10914# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10915 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10916#endif
10917
10918#ifndef IEM_WITH_SETJMP
10919# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10920 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10921# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10922 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10923#else
10924# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10925 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10926# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10927 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10928#endif
10929
10930
10931
10932#ifndef IEM_WITH_SETJMP
10933# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10934 do { \
10935 uint8_t u8Tmp; \
10936 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10937 (a_u16Dst) = u8Tmp; \
10938 } while (0)
10939# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10940 do { \
10941 uint8_t u8Tmp; \
10942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10943 (a_u32Dst) = u8Tmp; \
10944 } while (0)
10945# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10946 do { \
10947 uint8_t u8Tmp; \
10948 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10949 (a_u64Dst) = u8Tmp; \
10950 } while (0)
10951# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10952 do { \
10953 uint16_t u16Tmp; \
10954 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10955 (a_u32Dst) = u16Tmp; \
10956 } while (0)
10957# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10958 do { \
10959 uint16_t u16Tmp; \
10960 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10961 (a_u64Dst) = u16Tmp; \
10962 } while (0)
10963# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10964 do { \
10965 uint32_t u32Tmp; \
10966 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10967 (a_u64Dst) = u32Tmp; \
10968 } while (0)
10969#else /* IEM_WITH_SETJMP */
10970# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10971 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10972# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10973 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10974# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10975 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10976# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10977 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10978# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10979 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10980# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10981 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10982#endif /* IEM_WITH_SETJMP */
10983
10984#ifndef IEM_WITH_SETJMP
10985# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10986 do { \
10987 uint8_t u8Tmp; \
10988 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10989 (a_u16Dst) = (int8_t)u8Tmp; \
10990 } while (0)
10991# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10992 do { \
10993 uint8_t u8Tmp; \
10994 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10995 (a_u32Dst) = (int8_t)u8Tmp; \
10996 } while (0)
10997# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10998 do { \
10999 uint8_t u8Tmp; \
11000 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11001 (a_u64Dst) = (int8_t)u8Tmp; \
11002 } while (0)
11003# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11004 do { \
11005 uint16_t u16Tmp; \
11006 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11007 (a_u32Dst) = (int16_t)u16Tmp; \
11008 } while (0)
11009# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11010 do { \
11011 uint16_t u16Tmp; \
11012 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11013 (a_u64Dst) = (int16_t)u16Tmp; \
11014 } while (0)
11015# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11016 do { \
11017 uint32_t u32Tmp; \
11018 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11019 (a_u64Dst) = (int32_t)u32Tmp; \
11020 } while (0)
11021#else /* IEM_WITH_SETJMP */
11022# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11023 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11024# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11025 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11026# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11027 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11028# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11029 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11030# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11031 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11032# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11033 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11034#endif /* IEM_WITH_SETJMP */
11035
11036#ifndef IEM_WITH_SETJMP
11037# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11038 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11039# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11040 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11041# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11042 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11043# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11044 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11045#else
11046# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11047 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11048# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11049 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11050# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11051 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11052# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11053 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11054#endif
11055
11056#ifndef IEM_WITH_SETJMP
11057# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11058 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11059# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11060 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11061# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11062 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11063# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11064 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11065#else
11066# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11067 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11068# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11069 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11070# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11071 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11072# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11073 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11074#endif
11075
11076#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11077#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11078#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11079#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11080#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11081#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11082#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11083 do { \
11084 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11085 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11086 } while (0)
11087
11088#ifndef IEM_WITH_SETJMP
11089# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11090 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11091# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11092 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11093#else
11094# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11095 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11096# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11097 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11098#endif
11099
11100
11101#define IEM_MC_PUSH_U16(a_u16Value) \
11102 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11103#define IEM_MC_PUSH_U32(a_u32Value) \
11104 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11105#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11106 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11107#define IEM_MC_PUSH_U64(a_u64Value) \
11108 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11109
11110#define IEM_MC_POP_U16(a_pu16Value) \
11111 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11112#define IEM_MC_POP_U32(a_pu32Value) \
11113 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11114#define IEM_MC_POP_U64(a_pu64Value) \
11115 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11116
11117/** Maps guest memory for direct or bounce buffered access.
11118 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11119 * @remarks May return.
11120 */
11121#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11122 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11123
11124/** Maps guest memory for direct or bounce buffered access.
11125 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11126 * @remarks May return.
11127 */
11128#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11129 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11130
11131/** Commits the memory and unmaps the guest memory.
11132 * @remarks May return.
11133 */
11134#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11135 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11136
11137/** Commits the memory and unmaps the guest memory unless the FPU status word
11138 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11139 * that would cause FLD not to store.
11140 *
11141 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11142 * store, while \#P will not.
11143 *
11144 * @remarks May in theory return - for now.
11145 */
11146#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11147 do { \
11148 if ( !(a_u16FSW & X86_FSW_ES) \
11149 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11150 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11151 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11152 } while (0)
11153
11154/** Calculate efficient address from R/M. */
11155#ifndef IEM_WITH_SETJMP
11156# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11157 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11158#else
11159# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11160 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11161#endif
11162
11163#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11164#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11165#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11166#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11167#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11168#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11169#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11170
11171/**
11172 * Defers the rest of the instruction emulation to a C implementation routine
11173 * and returns, only taking the standard parameters.
11174 *
11175 * @param a_pfnCImpl The pointer to the C routine.
11176 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11177 */
11178#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11179
11180/**
11181 * Defers the rest of instruction emulation to a C implementation routine and
11182 * returns, taking one argument in addition to the standard ones.
11183 *
11184 * @param a_pfnCImpl The pointer to the C routine.
11185 * @param a0 The argument.
11186 */
11187#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11188
11189/**
11190 * Defers the rest of the instruction emulation to a C implementation routine
11191 * and returns, taking two arguments in addition to the standard ones.
11192 *
11193 * @param a_pfnCImpl The pointer to the C routine.
11194 * @param a0 The first extra argument.
11195 * @param a1 The second extra argument.
11196 */
11197#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11198
11199/**
11200 * Defers the rest of the instruction emulation to a C implementation routine
11201 * and returns, taking three arguments in addition to the standard ones.
11202 *
11203 * @param a_pfnCImpl The pointer to the C routine.
11204 * @param a0 The first extra argument.
11205 * @param a1 The second extra argument.
11206 * @param a2 The third extra argument.
11207 */
11208#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11209
11210/**
11211 * Defers the rest of the instruction emulation to a C implementation routine
11212 * and returns, taking four arguments in addition to the standard ones.
11213 *
11214 * @param a_pfnCImpl The pointer to the C routine.
11215 * @param a0 The first extra argument.
11216 * @param a1 The second extra argument.
11217 * @param a2 The third extra argument.
11218 * @param a3 The fourth extra argument.
11219 */
11220#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11221
11222/**
11223 * Defers the rest of the instruction emulation to a C implementation routine
11224 * and returns, taking two arguments in addition to the standard ones.
11225 *
11226 * @param a_pfnCImpl The pointer to the C routine.
11227 * @param a0 The first extra argument.
11228 * @param a1 The second extra argument.
11229 * @param a2 The third extra argument.
11230 * @param a3 The fourth extra argument.
11231 * @param a4 The fifth extra argument.
11232 */
11233#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11234
11235/**
11236 * Defers the entire instruction emulation to a C implementation routine and
11237 * returns, only taking the standard parameters.
11238 *
11239 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11240 *
11241 * @param a_pfnCImpl The pointer to the C routine.
11242 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11243 */
11244#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11245
11246/**
11247 * Defers the entire instruction emulation to a C implementation routine and
11248 * returns, taking one argument in addition to the standard ones.
11249 *
11250 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11251 *
11252 * @param a_pfnCImpl The pointer to the C routine.
11253 * @param a0 The argument.
11254 */
11255#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11256
11257/**
11258 * Defers the entire instruction emulation to a C implementation routine and
11259 * returns, taking two arguments in addition to the standard ones.
11260 *
11261 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11262 *
11263 * @param a_pfnCImpl The pointer to the C routine.
11264 * @param a0 The first extra argument.
11265 * @param a1 The second extra argument.
11266 */
11267#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11268
11269/**
11270 * Defers the entire instruction emulation to a C implementation routine and
11271 * returns, taking three arguments in addition to the standard ones.
11272 *
11273 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11274 *
11275 * @param a_pfnCImpl The pointer to the C routine.
11276 * @param a0 The first extra argument.
11277 * @param a1 The second extra argument.
11278 * @param a2 The third extra argument.
11279 */
11280#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11281
11282/**
11283 * Calls a FPU assembly implementation taking one visible argument.
11284 *
11285 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11286 * @param a0 The first extra argument.
11287 */
11288#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11289 do { \
11290 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11291 } while (0)
11292
11293/**
11294 * Calls a FPU assembly implementation taking two visible arguments.
11295 *
11296 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11297 * @param a0 The first extra argument.
11298 * @param a1 The second extra argument.
11299 */
11300#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11301 do { \
11302 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11303 } while (0)
11304
11305/**
11306 * Calls a FPU assembly implementation taking three visible arguments.
11307 *
11308 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11309 * @param a0 The first extra argument.
11310 * @param a1 The second extra argument.
11311 * @param a2 The third extra argument.
11312 */
11313#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11314 do { \
11315 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11316 } while (0)
11317
11318#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11319 do { \
11320 (a_FpuData).FSW = (a_FSW); \
11321 (a_FpuData).r80Result = *(a_pr80Value); \
11322 } while (0)
11323
11324/** Pushes FPU result onto the stack. */
11325#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11326 iemFpuPushResult(pVCpu, &a_FpuData)
11327/** Pushes FPU result onto the stack and sets the FPUDP. */
11328#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11329 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11330
11331/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11332#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11333 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11334
11335/** Stores FPU result in a stack register. */
11336#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11337 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11338/** Stores FPU result in a stack register and pops the stack. */
11339#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11340 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11341/** Stores FPU result in a stack register and sets the FPUDP. */
11342#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11343 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11344/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11345 * stack. */
11346#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11347 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11348
11349/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11350#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11351 iemFpuUpdateOpcodeAndIp(pVCpu)
11352/** Free a stack register (for FFREE and FFREEP). */
11353#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11354 iemFpuStackFree(pVCpu, a_iStReg)
11355/** Increment the FPU stack pointer. */
11356#define IEM_MC_FPU_STACK_INC_TOP() \
11357 iemFpuStackIncTop(pVCpu)
11358/** Decrement the FPU stack pointer. */
11359#define IEM_MC_FPU_STACK_DEC_TOP() \
11360 iemFpuStackDecTop(pVCpu)
11361
11362/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11363#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11364 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11365/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11366#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11367 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11368/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11369#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11370 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11371/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11372#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11373 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11374/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11375 * stack. */
11376#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11377 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11378/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11379#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11380 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11381
11382/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11383#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11384 iemFpuStackUnderflow(pVCpu, a_iStDst)
11385/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11386 * stack. */
11387#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11388 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11389/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11390 * FPUDS. */
11391#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11392 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11393/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11394 * FPUDS. Pops stack. */
11395#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11396 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11397/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11398 * stack twice. */
11399#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11400 iemFpuStackUnderflowThenPopPop(pVCpu)
11401/** Raises a FPU stack underflow exception for an instruction pushing a result
11402 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11403#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11404 iemFpuStackPushUnderflow(pVCpu)
11405/** Raises a FPU stack underflow exception for an instruction pushing a result
11406 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11407#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11408 iemFpuStackPushUnderflowTwo(pVCpu)
11409
11410/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11411 * FPUIP, FPUCS and FOP. */
11412#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11413 iemFpuStackPushOverflow(pVCpu)
11414/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11415 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11416#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11417 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11418/** Prepares for using the FPU state.
11419 * Ensures that we can use the host FPU in the current context (RC+R0.
11420 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11421#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11422/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11423#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11424/** Actualizes the guest FPU state so it can be accessed and modified. */
11425#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11426
11427/** Prepares for using the SSE state.
11428 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11429 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11430#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11431/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
11432#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11433/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
11434#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11435
11436/**
11437 * Calls a MMX assembly implementation taking two visible arguments.
11438 *
11439 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11440 * @param a0 The first extra argument.
11441 * @param a1 The second extra argument.
11442 */
11443#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11444 do { \
11445 IEM_MC_PREPARE_FPU_USAGE(); \
11446 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11447 } while (0)
11448
11449/**
11450 * Calls a MMX assembly implementation taking three visible arguments.
11451 *
11452 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11453 * @param a0 The first extra argument.
11454 * @param a1 The second extra argument.
11455 * @param a2 The third extra argument.
11456 */
11457#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11458 do { \
11459 IEM_MC_PREPARE_FPU_USAGE(); \
11460 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11461 } while (0)
11462
11463
11464/**
11465 * Calls a SSE assembly implementation taking two visible arguments.
11466 *
11467 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11468 * @param a0 The first extra argument.
11469 * @param a1 The second extra argument.
11470 */
11471#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11472 do { \
11473 IEM_MC_PREPARE_SSE_USAGE(); \
11474 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11475 } while (0)
11476
11477/**
11478 * Calls a SSE assembly implementation taking three visible arguments.
11479 *
11480 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11481 * @param a0 The first extra argument.
11482 * @param a1 The second extra argument.
11483 * @param a2 The third extra argument.
11484 */
11485#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11486 do { \
11487 IEM_MC_PREPARE_SSE_USAGE(); \
11488 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11489 } while (0)
11490
11491/** @note Not for IOPL or IF testing. */
11492#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11493/** @note Not for IOPL or IF testing. */
11494#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11495/** @note Not for IOPL or IF testing. */
11496#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11497/** @note Not for IOPL or IF testing. */
11498#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11499/** @note Not for IOPL or IF testing. */
11500#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11501 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11502 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11503/** @note Not for IOPL or IF testing. */
11504#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11505 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11506 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11507/** @note Not for IOPL or IF testing. */
11508#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11509 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11510 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11511 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11512/** @note Not for IOPL or IF testing. */
11513#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11514 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11515 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11516 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11517#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11518#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11519#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11520/** @note Not for IOPL or IF testing. */
11521#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11522 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11523 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11524/** @note Not for IOPL or IF testing. */
11525#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11526 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11527 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11528/** @note Not for IOPL or IF testing. */
11529#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11530 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11531 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11532/** @note Not for IOPL or IF testing. */
11533#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11534 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11535 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11536/** @note Not for IOPL or IF testing. */
11537#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11538 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11539 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11540/** @note Not for IOPL or IF testing. */
11541#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11542 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11543 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11544#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11545#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11546
11547#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11548 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11549#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11550 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11551#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11552 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11553#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11554 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11555#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11556 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11557#define IEM_MC_IF_FCW_IM() \
11558 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11559
11560#define IEM_MC_ELSE() } else {
11561#define IEM_MC_ENDIF() } do {} while (0)
11562
11563/** @} */
11564
11565
11566/** @name Opcode Debug Helpers.
11567 * @{
11568 */
11569#ifdef VBOX_WITH_STATISTICS
11570# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11571#else
11572# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11573#endif
11574
11575#ifdef DEBUG
11576# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11577 do { \
11578 IEMOP_INC_STATS(a_Stats); \
11579 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11580 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11581 } while (0)
11582
11583# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11584 do { \
11585 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11586 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11587 (void)RT_CONCAT(OP_,a_Upper); \
11588 (void)(a_fDisHints); \
11589 (void)(a_fIemHints); \
11590 } while (0)
11591
11592# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11593 do { \
11594 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11595 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11596 (void)RT_CONCAT(OP_,a_Upper); \
11597 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11598 (void)(a_fDisHints); \
11599 (void)(a_fIemHints); \
11600 } while (0)
11601
11602# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11603 do { \
11604 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11605 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11606 (void)RT_CONCAT(OP_,a_Upper); \
11607 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11608 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11609 (void)(a_fDisHints); \
11610 (void)(a_fIemHints); \
11611 } while (0)
11612
11613# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11614 do { \
11615 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11616 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11617 (void)RT_CONCAT(OP_,a_Upper); \
11618 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11619 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11620 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11621 (void)(a_fDisHints); \
11622 (void)(a_fIemHints); \
11623 } while (0)
11624
11625# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11626 do { \
11627 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11628 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11629 (void)RT_CONCAT(OP_,a_Upper); \
11630 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11631 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11632 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11633 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11634 (void)(a_fDisHints); \
11635 (void)(a_fIemHints); \
11636 } while (0)
11637
11638#else
11639# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11640
11641# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11642 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11643# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11644 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11645# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11646 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11647# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11648 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11649# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11650 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11651
11652#endif
11653
11654#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11655 IEMOP_MNEMONIC0EX(a_Lower, \
11656 #a_Lower, \
11657 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
11658#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11659 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
11660 #a_Lower " " #a_Op1, \
11661 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
11662#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11663 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
11664 #a_Lower " " #a_Op1 "," #a_Op2, \
11665 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
11666#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11667 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
11668 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
11669 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
11670#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11671 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
11672 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
11673 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
11674
11675/** @} */
11676
11677
11678/** @name Opcode Helpers.
11679 * @{
11680 */
11681
11682#ifdef IN_RING3
11683# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11684 do { \
11685 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11686 else \
11687 { \
11688 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11689 return IEMOP_RAISE_INVALID_OPCODE(); \
11690 } \
11691 } while (0)
11692#else
11693# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11694 do { \
11695 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11696 else return IEMOP_RAISE_INVALID_OPCODE(); \
11697 } while (0)
11698#endif
11699
11700/** The instruction requires a 186 or later. */
11701#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11702# define IEMOP_HLP_MIN_186() do { } while (0)
11703#else
11704# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11705#endif
11706
11707/** The instruction requires a 286 or later. */
11708#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11709# define IEMOP_HLP_MIN_286() do { } while (0)
11710#else
11711# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11712#endif
11713
11714/** The instruction requires a 386 or later. */
11715#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11716# define IEMOP_HLP_MIN_386() do { } while (0)
11717#else
11718# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11719#endif
11720
11721/** The instruction requires a 386 or later if the given expression is true. */
11722#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11723# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11724#else
11725# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11726#endif
11727
11728/** The instruction requires a 486 or later. */
11729#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11730# define IEMOP_HLP_MIN_486() do { } while (0)
11731#else
11732# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11733#endif
11734
11735/** The instruction requires a Pentium (586) or later. */
11736#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11737# define IEMOP_HLP_MIN_586() do { } while (0)
11738#else
11739# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11740#endif
11741
11742/** The instruction requires a PentiumPro (686) or later. */
11743#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11744# define IEMOP_HLP_MIN_686() do { } while (0)
11745#else
11746# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11747#endif
11748
11749
11750/** The instruction raises an \#UD in real and V8086 mode. */
11751#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11752 do \
11753 { \
11754 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
11755 else return IEMOP_RAISE_INVALID_OPCODE(); \
11756 } while (0)
11757
11758#if 0
11759#ifdef VBOX_WITH_NESTED_HWVIRT
11760/** The instruction raises an \#UD when SVM is not enabled. */
11761#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
11762 do \
11763 { \
11764 if (IEM_IS_SVM_ENABLED(pVCpu)) \
11765 return IEMOP_RAISE_INVALID_OPCODE(); \
11766 } while (0)
11767#endif
11768#endif
11769
11770/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11771 * 64-bit mode. */
11772#define IEMOP_HLP_NO_64BIT() \
11773 do \
11774 { \
11775 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11776 return IEMOP_RAISE_INVALID_OPCODE(); \
11777 } while (0)
11778
11779/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11780 * 64-bit mode. */
11781#define IEMOP_HLP_ONLY_64BIT() \
11782 do \
11783 { \
11784 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11785 return IEMOP_RAISE_INVALID_OPCODE(); \
11786 } while (0)
11787
11788/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11789#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11790 do \
11791 { \
11792 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11793 iemRecalEffOpSize64Default(pVCpu); \
11794 } while (0)
11795
11796/** The instruction has 64-bit operand size if 64-bit mode. */
11797#define IEMOP_HLP_64BIT_OP_SIZE() \
11798 do \
11799 { \
11800 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11801 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11802 } while (0)
11803
11804/** Only a REX prefix immediately preceeding the first opcode byte takes
11805 * effect. This macro helps ensuring this as well as logging bad guest code. */
11806#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11807 do \
11808 { \
11809 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11810 { \
11811 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11812 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11813 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11814 pVCpu->iem.s.uRexB = 0; \
11815 pVCpu->iem.s.uRexIndex = 0; \
11816 pVCpu->iem.s.uRexReg = 0; \
11817 iemRecalEffOpSize(pVCpu); \
11818 } \
11819 } while (0)
11820
11821/**
11822 * Done decoding.
11823 */
11824#define IEMOP_HLP_DONE_DECODING() \
11825 do \
11826 { \
11827 /*nothing for now, maybe later... */ \
11828 } while (0)
11829
11830/**
11831 * Done decoding, raise \#UD exception if lock prefix present.
11832 */
11833#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11834 do \
11835 { \
11836 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11837 { /* likely */ } \
11838 else \
11839 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11840 } while (0)
11841
11842#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11843 do \
11844 { \
11845 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11846 { /* likely */ } \
11847 else \
11848 { \
11849 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11850 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11851 } \
11852 } while (0)
11853#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11854 do \
11855 { \
11856 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11857 { /* likely */ } \
11858 else \
11859 { \
11860 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11861 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11862 } \
11863 } while (0)
11864
11865/**
11866 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11867 * are present.
11868 */
11869#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11870 do \
11871 { \
11872 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11873 { /* likely */ } \
11874 else \
11875 return IEMOP_RAISE_INVALID_OPCODE(); \
11876 } while (0)
11877
11878
11879/**
11880 * Done decoding VEX.
11881 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
11882 * we're in real or v8086 mode.
11883 */
11884#define IEMOP_HLP_DONE_VEX_DECODING() \
11885 do \
11886 { \
11887 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
11888 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
11889 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
11890 { /* likely */ } \
11891 else \
11892 return IEMOP_RAISE_INVALID_OPCODE(); \
11893 } while (0)
11894
11895/**
11896 * Done decoding VEX, no V, no L.
11897 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
11898 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
11899 */
11900#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
11901 do \
11902 { \
11903 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
11904 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
11905 && pVCpu->iem.s.uVexLength == 0 \
11906 && pVCpu->iem.s.uVex3rdReg == 0 \
11907 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
11908 { /* likely */ } \
11909 else \
11910 return IEMOP_RAISE_INVALID_OPCODE(); \
11911 } while (0)
11912
11913/**
11914 * Calculates the effective address of a ModR/M memory operand.
11915 *
11916 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11917 *
11918 * @return Strict VBox status code.
11919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11920 * @param bRm The ModRM byte.
11921 * @param cbImm The size of any immediate following the
11922 * effective address opcode bytes. Important for
11923 * RIP relative addressing.
11924 * @param pGCPtrEff Where to return the effective address.
11925 */
11926IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11927{
11928 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11929 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11930# define SET_SS_DEF() \
11931 do \
11932 { \
11933 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11934 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11935 } while (0)
11936
11937 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11938 {
11939/** @todo Check the effective address size crap! */
11940 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11941 {
11942 uint16_t u16EffAddr;
11943
11944 /* Handle the disp16 form with no registers first. */
11945 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11946 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11947 else
11948 {
11949 /* Get the displacment. */
11950 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11951 {
11952 case 0: u16EffAddr = 0; break;
11953 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11954 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11955 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11956 }
11957
11958 /* Add the base and index registers to the disp. */
11959 switch (bRm & X86_MODRM_RM_MASK)
11960 {
11961 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11962 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11963 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11964 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11965 case 4: u16EffAddr += pCtx->si; break;
11966 case 5: u16EffAddr += pCtx->di; break;
11967 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11968 case 7: u16EffAddr += pCtx->bx; break;
11969 }
11970 }
11971
11972 *pGCPtrEff = u16EffAddr;
11973 }
11974 else
11975 {
11976 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11977 uint32_t u32EffAddr;
11978
11979 /* Handle the disp32 form with no registers first. */
11980 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11981 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11982 else
11983 {
11984 /* Get the register (or SIB) value. */
11985 switch ((bRm & X86_MODRM_RM_MASK))
11986 {
11987 case 0: u32EffAddr = pCtx->eax; break;
11988 case 1: u32EffAddr = pCtx->ecx; break;
11989 case 2: u32EffAddr = pCtx->edx; break;
11990 case 3: u32EffAddr = pCtx->ebx; break;
11991 case 4: /* SIB */
11992 {
11993 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11994
11995 /* Get the index and scale it. */
11996 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11997 {
11998 case 0: u32EffAddr = pCtx->eax; break;
11999 case 1: u32EffAddr = pCtx->ecx; break;
12000 case 2: u32EffAddr = pCtx->edx; break;
12001 case 3: u32EffAddr = pCtx->ebx; break;
12002 case 4: u32EffAddr = 0; /*none */ break;
12003 case 5: u32EffAddr = pCtx->ebp; break;
12004 case 6: u32EffAddr = pCtx->esi; break;
12005 case 7: u32EffAddr = pCtx->edi; break;
12006 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12007 }
12008 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12009
12010 /* add base */
12011 switch (bSib & X86_SIB_BASE_MASK)
12012 {
12013 case 0: u32EffAddr += pCtx->eax; break;
12014 case 1: u32EffAddr += pCtx->ecx; break;
12015 case 2: u32EffAddr += pCtx->edx; break;
12016 case 3: u32EffAddr += pCtx->ebx; break;
12017 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12018 case 5:
12019 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12020 {
12021 u32EffAddr += pCtx->ebp;
12022 SET_SS_DEF();
12023 }
12024 else
12025 {
12026 uint32_t u32Disp;
12027 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12028 u32EffAddr += u32Disp;
12029 }
12030 break;
12031 case 6: u32EffAddr += pCtx->esi; break;
12032 case 7: u32EffAddr += pCtx->edi; break;
12033 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12034 }
12035 break;
12036 }
12037 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12038 case 6: u32EffAddr = pCtx->esi; break;
12039 case 7: u32EffAddr = pCtx->edi; break;
12040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12041 }
12042
12043 /* Get and add the displacement. */
12044 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12045 {
12046 case 0:
12047 break;
12048 case 1:
12049 {
12050 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12051 u32EffAddr += i8Disp;
12052 break;
12053 }
12054 case 2:
12055 {
12056 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12057 u32EffAddr += u32Disp;
12058 break;
12059 }
12060 default:
12061 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12062 }
12063
12064 }
12065 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12066 *pGCPtrEff = u32EffAddr;
12067 else
12068 {
12069 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12070 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12071 }
12072 }
12073 }
12074 else
12075 {
12076 uint64_t u64EffAddr;
12077
12078 /* Handle the rip+disp32 form with no registers first. */
12079 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12080 {
12081 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12082 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12083 }
12084 else
12085 {
12086 /* Get the register (or SIB) value. */
12087 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12088 {
12089 case 0: u64EffAddr = pCtx->rax; break;
12090 case 1: u64EffAddr = pCtx->rcx; break;
12091 case 2: u64EffAddr = pCtx->rdx; break;
12092 case 3: u64EffAddr = pCtx->rbx; break;
12093 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12094 case 6: u64EffAddr = pCtx->rsi; break;
12095 case 7: u64EffAddr = pCtx->rdi; break;
12096 case 8: u64EffAddr = pCtx->r8; break;
12097 case 9: u64EffAddr = pCtx->r9; break;
12098 case 10: u64EffAddr = pCtx->r10; break;
12099 case 11: u64EffAddr = pCtx->r11; break;
12100 case 13: u64EffAddr = pCtx->r13; break;
12101 case 14: u64EffAddr = pCtx->r14; break;
12102 case 15: u64EffAddr = pCtx->r15; break;
12103 /* SIB */
12104 case 4:
12105 case 12:
12106 {
12107 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12108
12109 /* Get the index and scale it. */
12110 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12111 {
12112 case 0: u64EffAddr = pCtx->rax; break;
12113 case 1: u64EffAddr = pCtx->rcx; break;
12114 case 2: u64EffAddr = pCtx->rdx; break;
12115 case 3: u64EffAddr = pCtx->rbx; break;
12116 case 4: u64EffAddr = 0; /*none */ break;
12117 case 5: u64EffAddr = pCtx->rbp; break;
12118 case 6: u64EffAddr = pCtx->rsi; break;
12119 case 7: u64EffAddr = pCtx->rdi; break;
12120 case 8: u64EffAddr = pCtx->r8; break;
12121 case 9: u64EffAddr = pCtx->r9; break;
12122 case 10: u64EffAddr = pCtx->r10; break;
12123 case 11: u64EffAddr = pCtx->r11; break;
12124 case 12: u64EffAddr = pCtx->r12; break;
12125 case 13: u64EffAddr = pCtx->r13; break;
12126 case 14: u64EffAddr = pCtx->r14; break;
12127 case 15: u64EffAddr = pCtx->r15; break;
12128 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12129 }
12130 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12131
12132 /* add base */
12133 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12134 {
12135 case 0: u64EffAddr += pCtx->rax; break;
12136 case 1: u64EffAddr += pCtx->rcx; break;
12137 case 2: u64EffAddr += pCtx->rdx; break;
12138 case 3: u64EffAddr += pCtx->rbx; break;
12139 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12140 case 6: u64EffAddr += pCtx->rsi; break;
12141 case 7: u64EffAddr += pCtx->rdi; break;
12142 case 8: u64EffAddr += pCtx->r8; break;
12143 case 9: u64EffAddr += pCtx->r9; break;
12144 case 10: u64EffAddr += pCtx->r10; break;
12145 case 11: u64EffAddr += pCtx->r11; break;
12146 case 12: u64EffAddr += pCtx->r12; break;
12147 case 14: u64EffAddr += pCtx->r14; break;
12148 case 15: u64EffAddr += pCtx->r15; break;
12149 /* complicated encodings */
12150 case 5:
12151 case 13:
12152 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12153 {
12154 if (!pVCpu->iem.s.uRexB)
12155 {
12156 u64EffAddr += pCtx->rbp;
12157 SET_SS_DEF();
12158 }
12159 else
12160 u64EffAddr += pCtx->r13;
12161 }
12162 else
12163 {
12164 uint32_t u32Disp;
12165 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12166 u64EffAddr += (int32_t)u32Disp;
12167 }
12168 break;
12169 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12170 }
12171 break;
12172 }
12173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12174 }
12175
12176 /* Get and add the displacement. */
12177 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12178 {
12179 case 0:
12180 break;
12181 case 1:
12182 {
12183 int8_t i8Disp;
12184 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12185 u64EffAddr += i8Disp;
12186 break;
12187 }
12188 case 2:
12189 {
12190 uint32_t u32Disp;
12191 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12192 u64EffAddr += (int32_t)u32Disp;
12193 break;
12194 }
12195 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12196 }
12197
12198 }
12199
12200 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12201 *pGCPtrEff = u64EffAddr;
12202 else
12203 {
12204 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12205 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12206 }
12207 }
12208
12209 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12210 return VINF_SUCCESS;
12211}
12212
12213
12214/**
12215 * Calculates the effective address of a ModR/M memory operand.
12216 *
12217 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12218 *
12219 * @return Strict VBox status code.
12220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12221 * @param bRm The ModRM byte.
12222 * @param cbImm The size of any immediate following the
12223 * effective address opcode bytes. Important for
12224 * RIP relative addressing.
12225 * @param pGCPtrEff Where to return the effective address.
12226 * @param offRsp RSP displacement.
12227 */
12228IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12229{
12230 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12231 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12232# define SET_SS_DEF() \
12233 do \
12234 { \
12235 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12236 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12237 } while (0)
12238
12239 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12240 {
12241/** @todo Check the effective address size crap! */
12242 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12243 {
12244 uint16_t u16EffAddr;
12245
12246 /* Handle the disp16 form with no registers first. */
12247 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12248 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12249 else
12250 {
12251 /* Get the displacment. */
12252 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12253 {
12254 case 0: u16EffAddr = 0; break;
12255 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12256 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12257 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12258 }
12259
12260 /* Add the base and index registers to the disp. */
12261 switch (bRm & X86_MODRM_RM_MASK)
12262 {
12263 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12264 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12265 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12266 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12267 case 4: u16EffAddr += pCtx->si; break;
12268 case 5: u16EffAddr += pCtx->di; break;
12269 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12270 case 7: u16EffAddr += pCtx->bx; break;
12271 }
12272 }
12273
12274 *pGCPtrEff = u16EffAddr;
12275 }
12276 else
12277 {
12278 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12279 uint32_t u32EffAddr;
12280
12281 /* Handle the disp32 form with no registers first. */
12282 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12283 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12284 else
12285 {
12286 /* Get the register (or SIB) value. */
12287 switch ((bRm & X86_MODRM_RM_MASK))
12288 {
12289 case 0: u32EffAddr = pCtx->eax; break;
12290 case 1: u32EffAddr = pCtx->ecx; break;
12291 case 2: u32EffAddr = pCtx->edx; break;
12292 case 3: u32EffAddr = pCtx->ebx; break;
12293 case 4: /* SIB */
12294 {
12295 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12296
12297 /* Get the index and scale it. */
12298 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12299 {
12300 case 0: u32EffAddr = pCtx->eax; break;
12301 case 1: u32EffAddr = pCtx->ecx; break;
12302 case 2: u32EffAddr = pCtx->edx; break;
12303 case 3: u32EffAddr = pCtx->ebx; break;
12304 case 4: u32EffAddr = 0; /*none */ break;
12305 case 5: u32EffAddr = pCtx->ebp; break;
12306 case 6: u32EffAddr = pCtx->esi; break;
12307 case 7: u32EffAddr = pCtx->edi; break;
12308 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12309 }
12310 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12311
12312 /* add base */
12313 switch (bSib & X86_SIB_BASE_MASK)
12314 {
12315 case 0: u32EffAddr += pCtx->eax; break;
12316 case 1: u32EffAddr += pCtx->ecx; break;
12317 case 2: u32EffAddr += pCtx->edx; break;
12318 case 3: u32EffAddr += pCtx->ebx; break;
12319 case 4:
12320 u32EffAddr += pCtx->esp + offRsp;
12321 SET_SS_DEF();
12322 break;
12323 case 5:
12324 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12325 {
12326 u32EffAddr += pCtx->ebp;
12327 SET_SS_DEF();
12328 }
12329 else
12330 {
12331 uint32_t u32Disp;
12332 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12333 u32EffAddr += u32Disp;
12334 }
12335 break;
12336 case 6: u32EffAddr += pCtx->esi; break;
12337 case 7: u32EffAddr += pCtx->edi; break;
12338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12339 }
12340 break;
12341 }
12342 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12343 case 6: u32EffAddr = pCtx->esi; break;
12344 case 7: u32EffAddr = pCtx->edi; break;
12345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12346 }
12347
12348 /* Get and add the displacement. */
12349 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12350 {
12351 case 0:
12352 break;
12353 case 1:
12354 {
12355 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12356 u32EffAddr += i8Disp;
12357 break;
12358 }
12359 case 2:
12360 {
12361 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12362 u32EffAddr += u32Disp;
12363 break;
12364 }
12365 default:
12366 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12367 }
12368
12369 }
12370 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12371 *pGCPtrEff = u32EffAddr;
12372 else
12373 {
12374 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12375 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12376 }
12377 }
12378 }
12379 else
12380 {
12381 uint64_t u64EffAddr;
12382
12383 /* Handle the rip+disp32 form with no registers first. */
12384 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12385 {
12386 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12387 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12388 }
12389 else
12390 {
12391 /* Get the register (or SIB) value. */
12392 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12393 {
12394 case 0: u64EffAddr = pCtx->rax; break;
12395 case 1: u64EffAddr = pCtx->rcx; break;
12396 case 2: u64EffAddr = pCtx->rdx; break;
12397 case 3: u64EffAddr = pCtx->rbx; break;
12398 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12399 case 6: u64EffAddr = pCtx->rsi; break;
12400 case 7: u64EffAddr = pCtx->rdi; break;
12401 case 8: u64EffAddr = pCtx->r8; break;
12402 case 9: u64EffAddr = pCtx->r9; break;
12403 case 10: u64EffAddr = pCtx->r10; break;
12404 case 11: u64EffAddr = pCtx->r11; break;
12405 case 13: u64EffAddr = pCtx->r13; break;
12406 case 14: u64EffAddr = pCtx->r14; break;
12407 case 15: u64EffAddr = pCtx->r15; break;
12408 /* SIB */
12409 case 4:
12410 case 12:
12411 {
12412 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12413
12414 /* Get the index and scale it. */
12415 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12416 {
12417 case 0: u64EffAddr = pCtx->rax; break;
12418 case 1: u64EffAddr = pCtx->rcx; break;
12419 case 2: u64EffAddr = pCtx->rdx; break;
12420 case 3: u64EffAddr = pCtx->rbx; break;
12421 case 4: u64EffAddr = 0; /*none */ break;
12422 case 5: u64EffAddr = pCtx->rbp; break;
12423 case 6: u64EffAddr = pCtx->rsi; break;
12424 case 7: u64EffAddr = pCtx->rdi; break;
12425 case 8: u64EffAddr = pCtx->r8; break;
12426 case 9: u64EffAddr = pCtx->r9; break;
12427 case 10: u64EffAddr = pCtx->r10; break;
12428 case 11: u64EffAddr = pCtx->r11; break;
12429 case 12: u64EffAddr = pCtx->r12; break;
12430 case 13: u64EffAddr = pCtx->r13; break;
12431 case 14: u64EffAddr = pCtx->r14; break;
12432 case 15: u64EffAddr = pCtx->r15; break;
12433 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12434 }
12435 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12436
12437 /* add base */
12438 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12439 {
12440 case 0: u64EffAddr += pCtx->rax; break;
12441 case 1: u64EffAddr += pCtx->rcx; break;
12442 case 2: u64EffAddr += pCtx->rdx; break;
12443 case 3: u64EffAddr += pCtx->rbx; break;
12444 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12445 case 6: u64EffAddr += pCtx->rsi; break;
12446 case 7: u64EffAddr += pCtx->rdi; break;
12447 case 8: u64EffAddr += pCtx->r8; break;
12448 case 9: u64EffAddr += pCtx->r9; break;
12449 case 10: u64EffAddr += pCtx->r10; break;
12450 case 11: u64EffAddr += pCtx->r11; break;
12451 case 12: u64EffAddr += pCtx->r12; break;
12452 case 14: u64EffAddr += pCtx->r14; break;
12453 case 15: u64EffAddr += pCtx->r15; break;
12454 /* complicated encodings */
12455 case 5:
12456 case 13:
12457 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12458 {
12459 if (!pVCpu->iem.s.uRexB)
12460 {
12461 u64EffAddr += pCtx->rbp;
12462 SET_SS_DEF();
12463 }
12464 else
12465 u64EffAddr += pCtx->r13;
12466 }
12467 else
12468 {
12469 uint32_t u32Disp;
12470 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12471 u64EffAddr += (int32_t)u32Disp;
12472 }
12473 break;
12474 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12475 }
12476 break;
12477 }
12478 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12479 }
12480
12481 /* Get and add the displacement. */
12482 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12483 {
12484 case 0:
12485 break;
12486 case 1:
12487 {
12488 int8_t i8Disp;
12489 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12490 u64EffAddr += i8Disp;
12491 break;
12492 }
12493 case 2:
12494 {
12495 uint32_t u32Disp;
12496 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12497 u64EffAddr += (int32_t)u32Disp;
12498 break;
12499 }
12500 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12501 }
12502
12503 }
12504
12505 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12506 *pGCPtrEff = u64EffAddr;
12507 else
12508 {
12509 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12510 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12511 }
12512 }
12513
12514 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12515 return VINF_SUCCESS;
12516}
12517
12518
12519#ifdef IEM_WITH_SETJMP
12520/**
12521 * Calculates the effective address of a ModR/M memory operand.
12522 *
12523 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12524 *
12525 * May longjmp on internal error.
12526 *
12527 * @return The effective address.
12528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12529 * @param bRm The ModRM byte.
12530 * @param cbImm The size of any immediate following the
12531 * effective address opcode bytes. Important for
12532 * RIP relative addressing.
12533 */
12534IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12535{
12536 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12537 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12538# define SET_SS_DEF() \
12539 do \
12540 { \
12541 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12542 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12543 } while (0)
12544
12545 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12546 {
12547/** @todo Check the effective address size crap! */
12548 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12549 {
12550 uint16_t u16EffAddr;
12551
12552 /* Handle the disp16 form with no registers first. */
12553 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12554 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12555 else
12556 {
12557 /* Get the displacment. */
12558 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12559 {
12560 case 0: u16EffAddr = 0; break;
12561 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12562 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12563 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12564 }
12565
12566 /* Add the base and index registers to the disp. */
12567 switch (bRm & X86_MODRM_RM_MASK)
12568 {
12569 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12570 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12571 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12572 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12573 case 4: u16EffAddr += pCtx->si; break;
12574 case 5: u16EffAddr += pCtx->di; break;
12575 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12576 case 7: u16EffAddr += pCtx->bx; break;
12577 }
12578 }
12579
12580 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12581 return u16EffAddr;
12582 }
12583
12584 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12585 uint32_t u32EffAddr;
12586
12587 /* Handle the disp32 form with no registers first. */
12588 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12589 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12590 else
12591 {
12592 /* Get the register (or SIB) value. */
12593 switch ((bRm & X86_MODRM_RM_MASK))
12594 {
12595 case 0: u32EffAddr = pCtx->eax; break;
12596 case 1: u32EffAddr = pCtx->ecx; break;
12597 case 2: u32EffAddr = pCtx->edx; break;
12598 case 3: u32EffAddr = pCtx->ebx; break;
12599 case 4: /* SIB */
12600 {
12601 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12602
12603 /* Get the index and scale it. */
12604 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12605 {
12606 case 0: u32EffAddr = pCtx->eax; break;
12607 case 1: u32EffAddr = pCtx->ecx; break;
12608 case 2: u32EffAddr = pCtx->edx; break;
12609 case 3: u32EffAddr = pCtx->ebx; break;
12610 case 4: u32EffAddr = 0; /*none */ break;
12611 case 5: u32EffAddr = pCtx->ebp; break;
12612 case 6: u32EffAddr = pCtx->esi; break;
12613 case 7: u32EffAddr = pCtx->edi; break;
12614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12615 }
12616 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12617
12618 /* add base */
12619 switch (bSib & X86_SIB_BASE_MASK)
12620 {
12621 case 0: u32EffAddr += pCtx->eax; break;
12622 case 1: u32EffAddr += pCtx->ecx; break;
12623 case 2: u32EffAddr += pCtx->edx; break;
12624 case 3: u32EffAddr += pCtx->ebx; break;
12625 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12626 case 5:
12627 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12628 {
12629 u32EffAddr += pCtx->ebp;
12630 SET_SS_DEF();
12631 }
12632 else
12633 {
12634 uint32_t u32Disp;
12635 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12636 u32EffAddr += u32Disp;
12637 }
12638 break;
12639 case 6: u32EffAddr += pCtx->esi; break;
12640 case 7: u32EffAddr += pCtx->edi; break;
12641 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12642 }
12643 break;
12644 }
12645 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12646 case 6: u32EffAddr = pCtx->esi; break;
12647 case 7: u32EffAddr = pCtx->edi; break;
12648 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12649 }
12650
12651 /* Get and add the displacement. */
12652 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12653 {
12654 case 0:
12655 break;
12656 case 1:
12657 {
12658 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12659 u32EffAddr += i8Disp;
12660 break;
12661 }
12662 case 2:
12663 {
12664 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12665 u32EffAddr += u32Disp;
12666 break;
12667 }
12668 default:
12669 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12670 }
12671 }
12672
12673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12674 {
12675 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12676 return u32EffAddr;
12677 }
12678 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12679 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12680 return u32EffAddr & UINT16_MAX;
12681 }
12682
12683 uint64_t u64EffAddr;
12684
12685 /* Handle the rip+disp32 form with no registers first. */
12686 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12687 {
12688 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12689 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12690 }
12691 else
12692 {
12693 /* Get the register (or SIB) value. */
12694 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12695 {
12696 case 0: u64EffAddr = pCtx->rax; break;
12697 case 1: u64EffAddr = pCtx->rcx; break;
12698 case 2: u64EffAddr = pCtx->rdx; break;
12699 case 3: u64EffAddr = pCtx->rbx; break;
12700 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12701 case 6: u64EffAddr = pCtx->rsi; break;
12702 case 7: u64EffAddr = pCtx->rdi; break;
12703 case 8: u64EffAddr = pCtx->r8; break;
12704 case 9: u64EffAddr = pCtx->r9; break;
12705 case 10: u64EffAddr = pCtx->r10; break;
12706 case 11: u64EffAddr = pCtx->r11; break;
12707 case 13: u64EffAddr = pCtx->r13; break;
12708 case 14: u64EffAddr = pCtx->r14; break;
12709 case 15: u64EffAddr = pCtx->r15; break;
12710 /* SIB */
12711 case 4:
12712 case 12:
12713 {
12714 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12715
12716 /* Get the index and scale it. */
12717 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12718 {
12719 case 0: u64EffAddr = pCtx->rax; break;
12720 case 1: u64EffAddr = pCtx->rcx; break;
12721 case 2: u64EffAddr = pCtx->rdx; break;
12722 case 3: u64EffAddr = pCtx->rbx; break;
12723 case 4: u64EffAddr = 0; /*none */ break;
12724 case 5: u64EffAddr = pCtx->rbp; break;
12725 case 6: u64EffAddr = pCtx->rsi; break;
12726 case 7: u64EffAddr = pCtx->rdi; break;
12727 case 8: u64EffAddr = pCtx->r8; break;
12728 case 9: u64EffAddr = pCtx->r9; break;
12729 case 10: u64EffAddr = pCtx->r10; break;
12730 case 11: u64EffAddr = pCtx->r11; break;
12731 case 12: u64EffAddr = pCtx->r12; break;
12732 case 13: u64EffAddr = pCtx->r13; break;
12733 case 14: u64EffAddr = pCtx->r14; break;
12734 case 15: u64EffAddr = pCtx->r15; break;
12735 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12736 }
12737 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12738
12739 /* add base */
12740 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12741 {
12742 case 0: u64EffAddr += pCtx->rax; break;
12743 case 1: u64EffAddr += pCtx->rcx; break;
12744 case 2: u64EffAddr += pCtx->rdx; break;
12745 case 3: u64EffAddr += pCtx->rbx; break;
12746 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12747 case 6: u64EffAddr += pCtx->rsi; break;
12748 case 7: u64EffAddr += pCtx->rdi; break;
12749 case 8: u64EffAddr += pCtx->r8; break;
12750 case 9: u64EffAddr += pCtx->r9; break;
12751 case 10: u64EffAddr += pCtx->r10; break;
12752 case 11: u64EffAddr += pCtx->r11; break;
12753 case 12: u64EffAddr += pCtx->r12; break;
12754 case 14: u64EffAddr += pCtx->r14; break;
12755 case 15: u64EffAddr += pCtx->r15; break;
12756 /* complicated encodings */
12757 case 5:
12758 case 13:
12759 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12760 {
12761 if (!pVCpu->iem.s.uRexB)
12762 {
12763 u64EffAddr += pCtx->rbp;
12764 SET_SS_DEF();
12765 }
12766 else
12767 u64EffAddr += pCtx->r13;
12768 }
12769 else
12770 {
12771 uint32_t u32Disp;
12772 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12773 u64EffAddr += (int32_t)u32Disp;
12774 }
12775 break;
12776 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12777 }
12778 break;
12779 }
12780 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12781 }
12782
12783 /* Get and add the displacement. */
12784 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12785 {
12786 case 0:
12787 break;
12788 case 1:
12789 {
12790 int8_t i8Disp;
12791 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12792 u64EffAddr += i8Disp;
12793 break;
12794 }
12795 case 2:
12796 {
12797 uint32_t u32Disp;
12798 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12799 u64EffAddr += (int32_t)u32Disp;
12800 break;
12801 }
12802 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12803 }
12804
12805 }
12806
12807 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12808 {
12809 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12810 return u64EffAddr;
12811 }
12812 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12813 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12814 return u64EffAddr & UINT32_MAX;
12815}
12816#endif /* IEM_WITH_SETJMP */
12817
12818
12819/** @} */
12820
12821
12822
12823/*
12824 * Include the instructions
12825 */
12826#include "IEMAllInstructions.cpp.h"
12827
12828
12829
12830
12831#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12832
12833/**
12834 * Sets up execution verification mode.
12835 */
12836IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12837{
12838 PVMCPU pVCpu = pVCpu;
12839 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12840
12841 /*
12842 * Always note down the address of the current instruction.
12843 */
12844 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12845 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12846
12847 /*
12848 * Enable verification and/or logging.
12849 */
12850 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12851 if ( fNewNoRem
12852 && ( 0
12853#if 0 /* auto enable on first paged protected mode interrupt */
12854 || ( pOrgCtx->eflags.Bits.u1IF
12855 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12856 && TRPMHasTrap(pVCpu)
12857 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12858#endif
12859#if 0
12860 || ( pOrgCtx->cs == 0x10
12861 && ( pOrgCtx->rip == 0x90119e3e
12862 || pOrgCtx->rip == 0x901d9810)
12863#endif
12864#if 0 /* Auto enable DSL - FPU stuff. */
12865 || ( pOrgCtx->cs == 0x10
12866 && (// pOrgCtx->rip == 0xc02ec07f
12867 //|| pOrgCtx->rip == 0xc02ec082
12868 //|| pOrgCtx->rip == 0xc02ec0c9
12869 0
12870 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12871#endif
12872#if 0 /* Auto enable DSL - fstp st0 stuff. */
12873 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12874#endif
12875#if 0
12876 || pOrgCtx->rip == 0x9022bb3a
12877#endif
12878#if 0
12879 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12880#endif
12881#if 0
12882 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12883 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12884#endif
12885#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12886 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12887 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12888 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12889#endif
12890#if 0 /* NT4SP1 - xadd early boot. */
12891 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12892#endif
12893#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12894 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12895#endif
12896#if 0 /* NT4SP1 - cmpxchg (AMD). */
12897 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12898#endif
12899#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12900 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12901#endif
12902#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12903 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12904
12905#endif
12906#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12907 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12908
12909#endif
12910#if 0 /* NT4SP1 - frstor [ecx] */
12911 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12912#endif
12913#if 0 /* xxxxxx - All long mode code. */
12914 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12915#endif
12916#if 0 /* rep movsq linux 3.7 64-bit boot. */
12917 || (pOrgCtx->rip == 0x0000000000100241)
12918#endif
12919#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12920 || (pOrgCtx->rip == 0x000000000215e240)
12921#endif
12922#if 0 /* DOS's size-overridden iret to v8086. */
12923 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12924#endif
12925 )
12926 )
12927 {
12928 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12929 RTLogFlags(NULL, "enabled");
12930 fNewNoRem = false;
12931 }
12932 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12933 {
12934 pVCpu->iem.s.fNoRem = fNewNoRem;
12935 if (!fNewNoRem)
12936 {
12937 LogAlways(("Enabling verification mode!\n"));
12938 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12939 }
12940 else
12941 LogAlways(("Disabling verification mode!\n"));
12942 }
12943
12944 /*
12945 * Switch state.
12946 */
12947 if (IEM_VERIFICATION_ENABLED(pVCpu))
12948 {
12949 static CPUMCTX s_DebugCtx; /* Ugly! */
12950
12951 s_DebugCtx = *pOrgCtx;
12952 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12953 }
12954
12955 /*
12956 * See if there is an interrupt pending in TRPM and inject it if we can.
12957 */
12958 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12959 if ( pOrgCtx->eflags.Bits.u1IF
12960 && TRPMHasTrap(pVCpu)
12961 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12962 {
12963 uint8_t u8TrapNo;
12964 TRPMEVENT enmType;
12965 RTGCUINT uErrCode;
12966 RTGCPTR uCr2;
12967 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12968 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12969 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12970 TRPMResetTrap(pVCpu);
12971 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12972 }
12973
12974 /*
12975 * Reset the counters.
12976 */
12977 pVCpu->iem.s.cIOReads = 0;
12978 pVCpu->iem.s.cIOWrites = 0;
12979 pVCpu->iem.s.fIgnoreRaxRdx = false;
12980 pVCpu->iem.s.fOverlappingMovs = false;
12981 pVCpu->iem.s.fProblematicMemory = false;
12982 pVCpu->iem.s.fUndefinedEFlags = 0;
12983
12984 if (IEM_VERIFICATION_ENABLED(pVCpu))
12985 {
12986 /*
12987 * Free all verification records.
12988 */
12989 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12990 pVCpu->iem.s.pIemEvtRecHead = NULL;
12991 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12992 do
12993 {
12994 while (pEvtRec)
12995 {
12996 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12997 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12998 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12999 pEvtRec = pNext;
13000 }
13001 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13002 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13003 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13004 } while (pEvtRec);
13005 }
13006}
13007
13008
13009/**
13010 * Allocate an event record.
13011 * @returns Pointer to a record.
13012 */
13013IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13014{
13015 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13016 return NULL;
13017
13018 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13019 if (pEvtRec)
13020 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13021 else
13022 {
13023 if (!pVCpu->iem.s.ppIemEvtRecNext)
13024 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13025
13026 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13027 if (!pEvtRec)
13028 return NULL;
13029 }
13030 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13031 pEvtRec->pNext = NULL;
13032 return pEvtRec;
13033}
13034
13035
13036/**
13037 * IOMMMIORead notification.
13038 */
13039VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13040{
13041 PVMCPU pVCpu = VMMGetCpu(pVM);
13042 if (!pVCpu)
13043 return;
13044 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13045 if (!pEvtRec)
13046 return;
13047 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13048 pEvtRec->u.RamRead.GCPhys = GCPhys;
13049 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13050 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13051 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13052}
13053
13054
13055/**
13056 * IOMMMIOWrite notification.
13057 */
13058VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13059{
13060 PVMCPU pVCpu = VMMGetCpu(pVM);
13061 if (!pVCpu)
13062 return;
13063 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13064 if (!pEvtRec)
13065 return;
13066 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13067 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13068 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13069 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13070 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13071 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13072 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13073 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13074 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13075}
13076
13077
13078/**
13079 * IOMIOPortRead notification.
13080 */
13081VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13082{
13083 PVMCPU pVCpu = VMMGetCpu(pVM);
13084 if (!pVCpu)
13085 return;
13086 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13087 if (!pEvtRec)
13088 return;
13089 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13090 pEvtRec->u.IOPortRead.Port = Port;
13091 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13092 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13093 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13094}
13095
13096/**
13097 * IOMIOPortWrite notification.
13098 */
13099VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13100{
13101 PVMCPU pVCpu = VMMGetCpu(pVM);
13102 if (!pVCpu)
13103 return;
13104 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13105 if (!pEvtRec)
13106 return;
13107 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13108 pEvtRec->u.IOPortWrite.Port = Port;
13109 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13110 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13111 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13112 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13113}
13114
13115
13116VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13117{
13118 PVMCPU pVCpu = VMMGetCpu(pVM);
13119 if (!pVCpu)
13120 return;
13121 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13122 if (!pEvtRec)
13123 return;
13124 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13125 pEvtRec->u.IOPortStrRead.Port = Port;
13126 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13127 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13128 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13129 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13130}
13131
13132
13133VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13134{
13135 PVMCPU pVCpu = VMMGetCpu(pVM);
13136 if (!pVCpu)
13137 return;
13138 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13139 if (!pEvtRec)
13140 return;
13141 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13142 pEvtRec->u.IOPortStrWrite.Port = Port;
13143 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13144 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13145 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13146 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13147}
13148
13149
13150/**
13151 * Fakes and records an I/O port read.
13152 *
13153 * @returns VINF_SUCCESS.
13154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13155 * @param Port The I/O port.
13156 * @param pu32Value Where to store the fake value.
13157 * @param cbValue The size of the access.
13158 */
13159IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13160{
13161 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13162 if (pEvtRec)
13163 {
13164 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13165 pEvtRec->u.IOPortRead.Port = Port;
13166 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13167 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13168 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13169 }
13170 pVCpu->iem.s.cIOReads++;
13171 *pu32Value = 0xcccccccc;
13172 return VINF_SUCCESS;
13173}
13174
13175
13176/**
13177 * Fakes and records an I/O port write.
13178 *
13179 * @returns VINF_SUCCESS.
13180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13181 * @param Port The I/O port.
13182 * @param u32Value The value being written.
13183 * @param cbValue The size of the access.
13184 */
13185IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13186{
13187 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13188 if (pEvtRec)
13189 {
13190 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13191 pEvtRec->u.IOPortWrite.Port = Port;
13192 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13193 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13194 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13195 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13196 }
13197 pVCpu->iem.s.cIOWrites++;
13198 return VINF_SUCCESS;
13199}
13200
13201
13202/**
13203 * Used to add extra details about a stub case.
13204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13205 */
13206IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13207{
13208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13209 PVM pVM = pVCpu->CTX_SUFF(pVM);
13210 PVMCPU pVCpu = pVCpu;
13211 char szRegs[4096];
13212 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13213 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13214 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13215 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13216 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13217 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13218 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13219 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13220 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13221 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13222 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13223 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13224 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13225 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13226 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13227 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13228 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13229 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13230 " efer=%016VR{efer}\n"
13231 " pat=%016VR{pat}\n"
13232 " sf_mask=%016VR{sf_mask}\n"
13233 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13234 " lstar=%016VR{lstar}\n"
13235 " star=%016VR{star} cstar=%016VR{cstar}\n"
13236 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13237 );
13238
13239 char szInstr1[256];
13240 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13241 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13242 szInstr1, sizeof(szInstr1), NULL);
13243 char szInstr2[256];
13244 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13245 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13246 szInstr2, sizeof(szInstr2), NULL);
13247
13248 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13249}
13250
13251
13252/**
13253 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13254 * dump to the assertion info.
13255 *
13256 * @param pEvtRec The record to dump.
13257 */
13258IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13259{
13260 switch (pEvtRec->enmEvent)
13261 {
13262 case IEMVERIFYEVENT_IOPORT_READ:
13263 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13264 pEvtRec->u.IOPortWrite.Port,
13265 pEvtRec->u.IOPortWrite.cbValue);
13266 break;
13267 case IEMVERIFYEVENT_IOPORT_WRITE:
13268 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13269 pEvtRec->u.IOPortWrite.Port,
13270 pEvtRec->u.IOPortWrite.cbValue,
13271 pEvtRec->u.IOPortWrite.u32Value);
13272 break;
13273 case IEMVERIFYEVENT_IOPORT_STR_READ:
13274 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13275 pEvtRec->u.IOPortStrWrite.Port,
13276 pEvtRec->u.IOPortStrWrite.cbValue,
13277 pEvtRec->u.IOPortStrWrite.cTransfers);
13278 break;
13279 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13280 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13281 pEvtRec->u.IOPortStrWrite.Port,
13282 pEvtRec->u.IOPortStrWrite.cbValue,
13283 pEvtRec->u.IOPortStrWrite.cTransfers);
13284 break;
13285 case IEMVERIFYEVENT_RAM_READ:
13286 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13287 pEvtRec->u.RamRead.GCPhys,
13288 pEvtRec->u.RamRead.cb);
13289 break;
13290 case IEMVERIFYEVENT_RAM_WRITE:
13291 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13292 pEvtRec->u.RamWrite.GCPhys,
13293 pEvtRec->u.RamWrite.cb,
13294 (int)pEvtRec->u.RamWrite.cb,
13295 pEvtRec->u.RamWrite.ab);
13296 break;
13297 default:
13298 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13299 break;
13300 }
13301}
13302
13303
13304/**
13305 * Raises an assertion on the specified record, showing the given message with
13306 * a record dump attached.
13307 *
13308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13309 * @param pEvtRec1 The first record.
13310 * @param pEvtRec2 The second record.
13311 * @param pszMsg The message explaining why we're asserting.
13312 */
13313IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13314{
13315 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13316 iemVerifyAssertAddRecordDump(pEvtRec1);
13317 iemVerifyAssertAddRecordDump(pEvtRec2);
13318 iemVerifyAssertMsg2(pVCpu);
13319 RTAssertPanic();
13320}
13321
13322
13323/**
13324 * Raises an assertion on the specified record, showing the given message with
13325 * a record dump attached.
13326 *
13327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13328 * @param pEvtRec1 The first record.
13329 * @param pszMsg The message explaining why we're asserting.
13330 */
13331IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13332{
13333 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13334 iemVerifyAssertAddRecordDump(pEvtRec);
13335 iemVerifyAssertMsg2(pVCpu);
13336 RTAssertPanic();
13337}
13338
13339
13340/**
13341 * Verifies a write record.
13342 *
13343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13344 * @param pEvtRec The write record.
13345 * @param fRem Set if REM was doing the other executing. If clear
13346 * it was HM.
13347 */
13348IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13349{
13350 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13351 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13352 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13353 if ( RT_FAILURE(rc)
13354 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13355 {
13356 /* fend off ins */
13357 if ( !pVCpu->iem.s.cIOReads
13358 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13359 || ( pEvtRec->u.RamWrite.cb != 1
13360 && pEvtRec->u.RamWrite.cb != 2
13361 && pEvtRec->u.RamWrite.cb != 4) )
13362 {
13363 /* fend off ROMs and MMIO */
13364 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13365 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13366 {
13367 /* fend off fxsave */
13368 if (pEvtRec->u.RamWrite.cb != 512)
13369 {
13370 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13371 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13372 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13373 RTAssertMsg2Add("%s: %.*Rhxs\n"
13374 "iem: %.*Rhxs\n",
13375 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13376 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13377 iemVerifyAssertAddRecordDump(pEvtRec);
13378 iemVerifyAssertMsg2(pVCpu);
13379 RTAssertPanic();
13380 }
13381 }
13382 }
13383 }
13384
13385}
13386
13387/**
13388 * Performs the post-execution verfication checks.
13389 */
13390IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13391{
13392 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13393 return rcStrictIem;
13394
13395 /*
13396 * Switch back the state.
13397 */
13398 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13399 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13400 Assert(pOrgCtx != pDebugCtx);
13401 IEM_GET_CTX(pVCpu) = pOrgCtx;
13402
13403 /*
13404 * Execute the instruction in REM.
13405 */
13406 bool fRem = false;
13407 PVM pVM = pVCpu->CTX_SUFF(pVM);
13408 PVMCPU pVCpu = pVCpu;
13409 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13410#ifdef IEM_VERIFICATION_MODE_FULL_HM
13411 if ( HMIsEnabled(pVM)
13412 && pVCpu->iem.s.cIOReads == 0
13413 && pVCpu->iem.s.cIOWrites == 0
13414 && !pVCpu->iem.s.fProblematicMemory)
13415 {
13416 uint64_t uStartRip = pOrgCtx->rip;
13417 unsigned iLoops = 0;
13418 do
13419 {
13420 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13421 iLoops++;
13422 } while ( rc == VINF_SUCCESS
13423 || ( rc == VINF_EM_DBG_STEPPED
13424 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13425 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13426 || ( pOrgCtx->rip != pDebugCtx->rip
13427 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13428 && iLoops < 8) );
13429 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13430 rc = VINF_SUCCESS;
13431 }
13432#endif
13433 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13434 || rc == VINF_IOM_R3_IOPORT_READ
13435 || rc == VINF_IOM_R3_IOPORT_WRITE
13436 || rc == VINF_IOM_R3_MMIO_READ
13437 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13438 || rc == VINF_IOM_R3_MMIO_WRITE
13439 || rc == VINF_CPUM_R3_MSR_READ
13440 || rc == VINF_CPUM_R3_MSR_WRITE
13441 || rc == VINF_EM_RESCHEDULE
13442 )
13443 {
13444 EMRemLock(pVM);
13445 rc = REMR3EmulateInstruction(pVM, pVCpu);
13446 AssertRC(rc);
13447 EMRemUnlock(pVM);
13448 fRem = true;
13449 }
13450
13451# if 1 /* Skip unimplemented instructions for now. */
13452 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13453 {
13454 IEM_GET_CTX(pVCpu) = pOrgCtx;
13455 if (rc == VINF_EM_DBG_STEPPED)
13456 return VINF_SUCCESS;
13457 return rc;
13458 }
13459# endif
13460
13461 /*
13462 * Compare the register states.
13463 */
13464 unsigned cDiffs = 0;
13465 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13466 {
13467 //Log(("REM and IEM ends up with different registers!\n"));
13468 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13469
13470# define CHECK_FIELD(a_Field) \
13471 do \
13472 { \
13473 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13474 { \
13475 switch (sizeof(pOrgCtx->a_Field)) \
13476 { \
13477 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13478 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13479 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13480 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13481 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13482 } \
13483 cDiffs++; \
13484 } \
13485 } while (0)
13486# define CHECK_XSTATE_FIELD(a_Field) \
13487 do \
13488 { \
13489 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13490 { \
13491 switch (sizeof(pOrgXState->a_Field)) \
13492 { \
13493 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13494 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13495 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13496 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13497 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13498 } \
13499 cDiffs++; \
13500 } \
13501 } while (0)
13502
13503# define CHECK_BIT_FIELD(a_Field) \
13504 do \
13505 { \
13506 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13507 { \
13508 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13509 cDiffs++; \
13510 } \
13511 } while (0)
13512
13513# define CHECK_SEL(a_Sel) \
13514 do \
13515 { \
13516 CHECK_FIELD(a_Sel.Sel); \
13517 CHECK_FIELD(a_Sel.Attr.u); \
13518 CHECK_FIELD(a_Sel.u64Base); \
13519 CHECK_FIELD(a_Sel.u32Limit); \
13520 CHECK_FIELD(a_Sel.fFlags); \
13521 } while (0)
13522
13523 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13524 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13525
13526#if 1 /* The recompiler doesn't update these the intel way. */
13527 if (fRem)
13528 {
13529 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13530 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13531 pOrgXState->x87.CS = pDebugXState->x87.CS;
13532 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13533 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13534 pOrgXState->x87.DS = pDebugXState->x87.DS;
13535 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13536 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13537 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13538 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13539 }
13540#endif
13541 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13542 {
13543 RTAssertMsg2Weak(" the FPU state differs\n");
13544 cDiffs++;
13545 CHECK_XSTATE_FIELD(x87.FCW);
13546 CHECK_XSTATE_FIELD(x87.FSW);
13547 CHECK_XSTATE_FIELD(x87.FTW);
13548 CHECK_XSTATE_FIELD(x87.FOP);
13549 CHECK_XSTATE_FIELD(x87.FPUIP);
13550 CHECK_XSTATE_FIELD(x87.CS);
13551 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13552 CHECK_XSTATE_FIELD(x87.FPUDP);
13553 CHECK_XSTATE_FIELD(x87.DS);
13554 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13555 CHECK_XSTATE_FIELD(x87.MXCSR);
13556 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13557 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13558 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13559 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13560 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13561 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13562 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13563 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13564 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13565 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13566 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13567 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13568 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13569 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13570 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13571 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13572 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13573 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13574 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13575 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13576 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13577 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13578 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13579 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13580 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13581 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13582 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13583 }
13584 CHECK_FIELD(rip);
13585 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13586 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13587 {
13588 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13589 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13590 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13591 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13592 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13593 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13594 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13595 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13596 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13597 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13598 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13599 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13600 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13601 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13602 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13603 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13604 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13605 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13606 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13607 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13608 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13609 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13610 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13611 }
13612
13613 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13614 CHECK_FIELD(rax);
13615 CHECK_FIELD(rcx);
13616 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13617 CHECK_FIELD(rdx);
13618 CHECK_FIELD(rbx);
13619 CHECK_FIELD(rsp);
13620 CHECK_FIELD(rbp);
13621 CHECK_FIELD(rsi);
13622 CHECK_FIELD(rdi);
13623 CHECK_FIELD(r8);
13624 CHECK_FIELD(r9);
13625 CHECK_FIELD(r10);
13626 CHECK_FIELD(r11);
13627 CHECK_FIELD(r12);
13628 CHECK_FIELD(r13);
13629 CHECK_SEL(cs);
13630 CHECK_SEL(ss);
13631 CHECK_SEL(ds);
13632 CHECK_SEL(es);
13633 CHECK_SEL(fs);
13634 CHECK_SEL(gs);
13635 CHECK_FIELD(cr0);
13636
13637 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13638 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13639 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13640 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13641 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13642 {
13643 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13644 { /* ignore */ }
13645 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13646 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13647 && fRem)
13648 { /* ignore */ }
13649 else
13650 CHECK_FIELD(cr2);
13651 }
13652 CHECK_FIELD(cr3);
13653 CHECK_FIELD(cr4);
13654 CHECK_FIELD(dr[0]);
13655 CHECK_FIELD(dr[1]);
13656 CHECK_FIELD(dr[2]);
13657 CHECK_FIELD(dr[3]);
13658 CHECK_FIELD(dr[6]);
13659 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13660 CHECK_FIELD(dr[7]);
13661 CHECK_FIELD(gdtr.cbGdt);
13662 CHECK_FIELD(gdtr.pGdt);
13663 CHECK_FIELD(idtr.cbIdt);
13664 CHECK_FIELD(idtr.pIdt);
13665 CHECK_SEL(ldtr);
13666 CHECK_SEL(tr);
13667 CHECK_FIELD(SysEnter.cs);
13668 CHECK_FIELD(SysEnter.eip);
13669 CHECK_FIELD(SysEnter.esp);
13670 CHECK_FIELD(msrEFER);
13671 CHECK_FIELD(msrSTAR);
13672 CHECK_FIELD(msrPAT);
13673 CHECK_FIELD(msrLSTAR);
13674 CHECK_FIELD(msrCSTAR);
13675 CHECK_FIELD(msrSFMASK);
13676 CHECK_FIELD(msrKERNELGSBASE);
13677
13678 if (cDiffs != 0)
13679 {
13680 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13681 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13682 RTAssertPanic();
13683 static bool volatile s_fEnterDebugger = true;
13684 if (s_fEnterDebugger)
13685 DBGFSTOP(pVM);
13686
13687# if 1 /* Ignore unimplemented instructions for now. */
13688 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13689 rcStrictIem = VINF_SUCCESS;
13690# endif
13691 }
13692# undef CHECK_FIELD
13693# undef CHECK_BIT_FIELD
13694 }
13695
13696 /*
13697 * If the register state compared fine, check the verification event
13698 * records.
13699 */
13700 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13701 {
13702 /*
13703 * Compare verficiation event records.
13704 * - I/O port accesses should be a 1:1 match.
13705 */
13706 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13707 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13708 while (pIemRec && pOtherRec)
13709 {
13710 /* Since we might miss RAM writes and reads, ignore reads and check
13711 that any written memory is the same extra ones. */
13712 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13713 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13714 && pIemRec->pNext)
13715 {
13716 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13717 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13718 pIemRec = pIemRec->pNext;
13719 }
13720
13721 /* Do the compare. */
13722 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13723 {
13724 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13725 break;
13726 }
13727 bool fEquals;
13728 switch (pIemRec->enmEvent)
13729 {
13730 case IEMVERIFYEVENT_IOPORT_READ:
13731 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13732 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13733 break;
13734 case IEMVERIFYEVENT_IOPORT_WRITE:
13735 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13736 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13737 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13738 break;
13739 case IEMVERIFYEVENT_IOPORT_STR_READ:
13740 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13741 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13742 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13743 break;
13744 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13745 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13746 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13747 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13748 break;
13749 case IEMVERIFYEVENT_RAM_READ:
13750 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13751 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13752 break;
13753 case IEMVERIFYEVENT_RAM_WRITE:
13754 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13755 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13756 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13757 break;
13758 default:
13759 fEquals = false;
13760 break;
13761 }
13762 if (!fEquals)
13763 {
13764 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13765 break;
13766 }
13767
13768 /* advance */
13769 pIemRec = pIemRec->pNext;
13770 pOtherRec = pOtherRec->pNext;
13771 }
13772
13773 /* Ignore extra writes and reads. */
13774 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13775 {
13776 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13777 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13778 pIemRec = pIemRec->pNext;
13779 }
13780 if (pIemRec != NULL)
13781 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13782 else if (pOtherRec != NULL)
13783 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13784 }
13785 IEM_GET_CTX(pVCpu) = pOrgCtx;
13786
13787 return rcStrictIem;
13788}
13789
13790#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13791
13792/* stubs */
13793IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13794{
13795 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13796 return VERR_INTERNAL_ERROR;
13797}
13798
13799IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13800{
13801 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13802 return VERR_INTERNAL_ERROR;
13803}
13804
13805#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13806
13807
13808#ifdef LOG_ENABLED
13809/**
13810 * Logs the current instruction.
13811 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13812 * @param pCtx The current CPU context.
13813 * @param fSameCtx Set if we have the same context information as the VMM,
13814 * clear if we may have already executed an instruction in
13815 * our debug context. When clear, we assume IEMCPU holds
13816 * valid CPU mode info.
13817 */
13818IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13819{
13820# ifdef IN_RING3
13821 if (LogIs2Enabled())
13822 {
13823 char szInstr[256];
13824 uint32_t cbInstr = 0;
13825 if (fSameCtx)
13826 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13827 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13828 szInstr, sizeof(szInstr), &cbInstr);
13829 else
13830 {
13831 uint32_t fFlags = 0;
13832 switch (pVCpu->iem.s.enmCpuMode)
13833 {
13834 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13835 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13836 case IEMMODE_16BIT:
13837 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13838 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13839 else
13840 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13841 break;
13842 }
13843 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13844 szInstr, sizeof(szInstr), &cbInstr);
13845 }
13846
13847 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13848 Log2(("****\n"
13849 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13850 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13851 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13852 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13853 " %s\n"
13854 ,
13855 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13856 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13857 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13858 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13859 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13860 szInstr));
13861
13862 if (LogIs3Enabled())
13863 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13864 }
13865 else
13866# endif
13867 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13868 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13869 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13870}
13871#endif
13872
13873
13874/**
13875 * Makes status code addjustments (pass up from I/O and access handler)
13876 * as well as maintaining statistics.
13877 *
13878 * @returns Strict VBox status code to pass up.
13879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13880 * @param rcStrict The status from executing an instruction.
13881 */
13882DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13883{
13884 if (rcStrict != VINF_SUCCESS)
13885 {
13886 if (RT_SUCCESS(rcStrict))
13887 {
13888 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13889 || rcStrict == VINF_IOM_R3_IOPORT_READ
13890 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13891 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13892 || rcStrict == VINF_IOM_R3_MMIO_READ
13893 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13894 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13895 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13896 || rcStrict == VINF_CPUM_R3_MSR_READ
13897 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13898 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13899 || rcStrict == VINF_EM_RAW_TO_R3
13900 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13901 /* raw-mode / virt handlers only: */
13902 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13903 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13904 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13905 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13906 || rcStrict == VINF_SELM_SYNC_GDT
13907 || rcStrict == VINF_CSAM_PENDING_ACTION
13908 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13909 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13910/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13911 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13912 if (rcPassUp == VINF_SUCCESS)
13913 pVCpu->iem.s.cRetInfStatuses++;
13914 else if ( rcPassUp < VINF_EM_FIRST
13915 || rcPassUp > VINF_EM_LAST
13916 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13917 {
13918 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13919 pVCpu->iem.s.cRetPassUpStatus++;
13920 rcStrict = rcPassUp;
13921 }
13922 else
13923 {
13924 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13925 pVCpu->iem.s.cRetInfStatuses++;
13926 }
13927 }
13928 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13929 pVCpu->iem.s.cRetAspectNotImplemented++;
13930 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13931 pVCpu->iem.s.cRetInstrNotImplemented++;
13932#ifdef IEM_VERIFICATION_MODE_FULL
13933 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13934 rcStrict = VINF_SUCCESS;
13935#endif
13936 else
13937 pVCpu->iem.s.cRetErrStatuses++;
13938 }
13939 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13940 {
13941 pVCpu->iem.s.cRetPassUpStatus++;
13942 rcStrict = pVCpu->iem.s.rcPassUp;
13943 }
13944
13945 return rcStrict;
13946}
13947
13948
13949/**
13950 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13951 * IEMExecOneWithPrefetchedByPC.
13952 *
13953 * Similar code is found in IEMExecLots.
13954 *
13955 * @return Strict VBox status code.
13956 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13958 * @param fExecuteInhibit If set, execute the instruction following CLI,
13959 * POP SS and MOV SS,GR.
13960 */
13961DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13962{
13963#ifdef IEM_WITH_SETJMP
13964 VBOXSTRICTRC rcStrict;
13965 jmp_buf JmpBuf;
13966 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13967 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13968 if ((rcStrict = setjmp(JmpBuf)) == 0)
13969 {
13970 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13971 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13972 }
13973 else
13974 pVCpu->iem.s.cLongJumps++;
13975 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13976#else
13977 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13978 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13979#endif
13980 if (rcStrict == VINF_SUCCESS)
13981 pVCpu->iem.s.cInstructions++;
13982 if (pVCpu->iem.s.cActiveMappings > 0)
13983 {
13984 Assert(rcStrict != VINF_SUCCESS);
13985 iemMemRollback(pVCpu);
13986 }
13987//#ifdef DEBUG
13988// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13989//#endif
13990
13991 /* Execute the next instruction as well if a cli, pop ss or
13992 mov ss, Gr has just completed successfully. */
13993 if ( fExecuteInhibit
13994 && rcStrict == VINF_SUCCESS
13995 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13996 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13997 {
13998 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13999 if (rcStrict == VINF_SUCCESS)
14000 {
14001#ifdef LOG_ENABLED
14002 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14003#endif
14004#ifdef IEM_WITH_SETJMP
14005 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14006 if ((rcStrict = setjmp(JmpBuf)) == 0)
14007 {
14008 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14009 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14010 }
14011 else
14012 pVCpu->iem.s.cLongJumps++;
14013 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14014#else
14015 IEM_OPCODE_GET_NEXT_U8(&b);
14016 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14017#endif
14018 if (rcStrict == VINF_SUCCESS)
14019 pVCpu->iem.s.cInstructions++;
14020 if (pVCpu->iem.s.cActiveMappings > 0)
14021 {
14022 Assert(rcStrict != VINF_SUCCESS);
14023 iemMemRollback(pVCpu);
14024 }
14025 }
14026 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14027 }
14028
14029 /*
14030 * Return value fiddling, statistics and sanity assertions.
14031 */
14032 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14033
14034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14036#if defined(IEM_VERIFICATION_MODE_FULL)
14037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14041#endif
14042 return rcStrict;
14043}
14044
14045
14046#ifdef IN_RC
14047/**
14048 * Re-enters raw-mode or ensure we return to ring-3.
14049 *
14050 * @returns rcStrict, maybe modified.
14051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14052 * @param pCtx The current CPU context.
14053 * @param rcStrict The status code returne by the interpreter.
14054 */
14055DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14056{
14057 if ( !pVCpu->iem.s.fInPatchCode
14058 && ( rcStrict == VINF_SUCCESS
14059 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14060 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14061 {
14062 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14063 CPUMRawEnter(pVCpu);
14064 else
14065 {
14066 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14067 rcStrict = VINF_EM_RESCHEDULE;
14068 }
14069 }
14070 return rcStrict;
14071}
14072#endif
14073
14074
14075/**
14076 * Execute one instruction.
14077 *
14078 * @return Strict VBox status code.
14079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14080 */
14081VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14082{
14083#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14084 if (++pVCpu->iem.s.cVerifyDepth == 1)
14085 iemExecVerificationModeSetup(pVCpu);
14086#endif
14087#ifdef LOG_ENABLED
14088 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14089 iemLogCurInstr(pVCpu, pCtx, true);
14090#endif
14091
14092 /*
14093 * Do the decoding and emulation.
14094 */
14095 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14096 if (rcStrict == VINF_SUCCESS)
14097 rcStrict = iemExecOneInner(pVCpu, true);
14098
14099#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14100 /*
14101 * Assert some sanity.
14102 */
14103 if (pVCpu->iem.s.cVerifyDepth == 1)
14104 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14105 pVCpu->iem.s.cVerifyDepth--;
14106#endif
14107#ifdef IN_RC
14108 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14109#endif
14110 if (rcStrict != VINF_SUCCESS)
14111 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14112 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14113 return rcStrict;
14114}
14115
14116
14117VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14118{
14119 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14120 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14121
14122 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14123 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14124 if (rcStrict == VINF_SUCCESS)
14125 {
14126 rcStrict = iemExecOneInner(pVCpu, true);
14127 if (pcbWritten)
14128 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14129 }
14130
14131#ifdef IN_RC
14132 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14133#endif
14134 return rcStrict;
14135}
14136
14137
14138VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14139 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14140{
14141 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14142 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14143
14144 VBOXSTRICTRC rcStrict;
14145 if ( cbOpcodeBytes
14146 && pCtx->rip == OpcodeBytesPC)
14147 {
14148 iemInitDecoder(pVCpu, false);
14149#ifdef IEM_WITH_CODE_TLB
14150 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14151 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14152 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14153 pVCpu->iem.s.offCurInstrStart = 0;
14154 pVCpu->iem.s.offInstrNextByte = 0;
14155#else
14156 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14157 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14158#endif
14159 rcStrict = VINF_SUCCESS;
14160 }
14161 else
14162 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14163 if (rcStrict == VINF_SUCCESS)
14164 {
14165 rcStrict = iemExecOneInner(pVCpu, true);
14166 }
14167
14168#ifdef IN_RC
14169 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14170#endif
14171 return rcStrict;
14172}
14173
14174
14175VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14176{
14177 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14178 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14179
14180 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14181 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14182 if (rcStrict == VINF_SUCCESS)
14183 {
14184 rcStrict = iemExecOneInner(pVCpu, false);
14185 if (pcbWritten)
14186 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14187 }
14188
14189#ifdef IN_RC
14190 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14191#endif
14192 return rcStrict;
14193}
14194
14195
14196VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14197 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14198{
14199 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14200 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14201
14202 VBOXSTRICTRC rcStrict;
14203 if ( cbOpcodeBytes
14204 && pCtx->rip == OpcodeBytesPC)
14205 {
14206 iemInitDecoder(pVCpu, true);
14207#ifdef IEM_WITH_CODE_TLB
14208 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14209 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14210 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14211 pVCpu->iem.s.offCurInstrStart = 0;
14212 pVCpu->iem.s.offInstrNextByte = 0;
14213#else
14214 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14215 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14216#endif
14217 rcStrict = VINF_SUCCESS;
14218 }
14219 else
14220 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14221 if (rcStrict == VINF_SUCCESS)
14222 rcStrict = iemExecOneInner(pVCpu, false);
14223
14224#ifdef IN_RC
14225 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14226#endif
14227 return rcStrict;
14228}
14229
14230
14231/**
14232 * For debugging DISGetParamSize, may come in handy.
14233 *
14234 * @returns Strict VBox status code.
14235 * @param pVCpu The cross context virtual CPU structure of the
14236 * calling EMT.
14237 * @param pCtxCore The context core structure.
14238 * @param OpcodeBytesPC The PC of the opcode bytes.
14239 * @param pvOpcodeBytes Prefeched opcode bytes.
14240 * @param cbOpcodeBytes Number of prefetched bytes.
14241 * @param pcbWritten Where to return the number of bytes written.
14242 * Optional.
14243 */
14244VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14245 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14246 uint32_t *pcbWritten)
14247{
14248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14249 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14250
14251 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14252 VBOXSTRICTRC rcStrict;
14253 if ( cbOpcodeBytes
14254 && pCtx->rip == OpcodeBytesPC)
14255 {
14256 iemInitDecoder(pVCpu, true);
14257#ifdef IEM_WITH_CODE_TLB
14258 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14259 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14260 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14261 pVCpu->iem.s.offCurInstrStart = 0;
14262 pVCpu->iem.s.offInstrNextByte = 0;
14263#else
14264 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14265 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14266#endif
14267 rcStrict = VINF_SUCCESS;
14268 }
14269 else
14270 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14271 if (rcStrict == VINF_SUCCESS)
14272 {
14273 rcStrict = iemExecOneInner(pVCpu, false);
14274 if (pcbWritten)
14275 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14276 }
14277
14278#ifdef IN_RC
14279 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14280#endif
14281 return rcStrict;
14282}
14283
14284
14285VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14286{
14287 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14288
14289#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14290 /*
14291 * See if there is an interrupt pending in TRPM, inject it if we can.
14292 */
14293 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14294# ifdef IEM_VERIFICATION_MODE_FULL
14295 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14296# endif
14297 if ( pCtx->eflags.Bits.u1IF
14298 && TRPMHasTrap(pVCpu)
14299 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14300 {
14301 uint8_t u8TrapNo;
14302 TRPMEVENT enmType;
14303 RTGCUINT uErrCode;
14304 RTGCPTR uCr2;
14305 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14306 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14307 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14308 TRPMResetTrap(pVCpu);
14309 }
14310
14311 /*
14312 * Log the state.
14313 */
14314# ifdef LOG_ENABLED
14315 iemLogCurInstr(pVCpu, pCtx, true);
14316# endif
14317
14318 /*
14319 * Do the decoding and emulation.
14320 */
14321 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14322 if (rcStrict == VINF_SUCCESS)
14323 rcStrict = iemExecOneInner(pVCpu, true);
14324
14325 /*
14326 * Assert some sanity.
14327 */
14328 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14329
14330 /*
14331 * Log and return.
14332 */
14333 if (rcStrict != VINF_SUCCESS)
14334 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14335 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14336 if (pcInstructions)
14337 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14338 return rcStrict;
14339
14340#else /* Not verification mode */
14341
14342 /*
14343 * See if there is an interrupt pending in TRPM, inject it if we can.
14344 */
14345 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14346# ifdef IEM_VERIFICATION_MODE_FULL
14347 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14348# endif
14349 if ( pCtx->eflags.Bits.u1IF
14350 && TRPMHasTrap(pVCpu)
14351 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14352 {
14353 uint8_t u8TrapNo;
14354 TRPMEVENT enmType;
14355 RTGCUINT uErrCode;
14356 RTGCPTR uCr2;
14357 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14358 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14359 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14360 TRPMResetTrap(pVCpu);
14361 }
14362
14363 /*
14364 * Initial decoder init w/ prefetch, then setup setjmp.
14365 */
14366 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14367 if (rcStrict == VINF_SUCCESS)
14368 {
14369# ifdef IEM_WITH_SETJMP
14370 jmp_buf JmpBuf;
14371 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14372 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14373 pVCpu->iem.s.cActiveMappings = 0;
14374 if ((rcStrict = setjmp(JmpBuf)) == 0)
14375# endif
14376 {
14377 /*
14378 * The run loop. We limit ourselves to 4096 instructions right now.
14379 */
14380 PVM pVM = pVCpu->CTX_SUFF(pVM);
14381 uint32_t cInstr = 4096;
14382 for (;;)
14383 {
14384 /*
14385 * Log the state.
14386 */
14387# ifdef LOG_ENABLED
14388 iemLogCurInstr(pVCpu, pCtx, true);
14389# endif
14390
14391 /*
14392 * Do the decoding and emulation.
14393 */
14394 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14395 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14396 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14397 {
14398 Assert(pVCpu->iem.s.cActiveMappings == 0);
14399 pVCpu->iem.s.cInstructions++;
14400 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14401 {
14402 uint32_t fCpu = pVCpu->fLocalForcedActions
14403 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14404 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14405 | VMCPU_FF_TLB_FLUSH
14406# ifdef VBOX_WITH_RAW_MODE
14407 | VMCPU_FF_TRPM_SYNC_IDT
14408 | VMCPU_FF_SELM_SYNC_TSS
14409 | VMCPU_FF_SELM_SYNC_GDT
14410 | VMCPU_FF_SELM_SYNC_LDT
14411# endif
14412 | VMCPU_FF_INHIBIT_INTERRUPTS
14413 | VMCPU_FF_BLOCK_NMIS
14414 | VMCPU_FF_UNHALT ));
14415
14416 if (RT_LIKELY( ( !fCpu
14417 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14418 && !pCtx->rflags.Bits.u1IF) )
14419 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14420 {
14421 if (cInstr-- > 0)
14422 {
14423 Assert(pVCpu->iem.s.cActiveMappings == 0);
14424 iemReInitDecoder(pVCpu);
14425 continue;
14426 }
14427 }
14428 }
14429 Assert(pVCpu->iem.s.cActiveMappings == 0);
14430 }
14431 else if (pVCpu->iem.s.cActiveMappings > 0)
14432 iemMemRollback(pVCpu);
14433 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14434 break;
14435 }
14436 }
14437# ifdef IEM_WITH_SETJMP
14438 else
14439 {
14440 if (pVCpu->iem.s.cActiveMappings > 0)
14441 iemMemRollback(pVCpu);
14442 pVCpu->iem.s.cLongJumps++;
14443 }
14444 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14445# endif
14446
14447 /*
14448 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14449 */
14450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14452# if defined(IEM_VERIFICATION_MODE_FULL)
14453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14456 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14457# endif
14458 }
14459
14460 /*
14461 * Maybe re-enter raw-mode and log.
14462 */
14463# ifdef IN_RC
14464 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14465# endif
14466 if (rcStrict != VINF_SUCCESS)
14467 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14468 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14469 if (pcInstructions)
14470 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14471 return rcStrict;
14472#endif /* Not verification mode */
14473}
14474
14475
14476
14477/**
14478 * Injects a trap, fault, abort, software interrupt or external interrupt.
14479 *
14480 * The parameter list matches TRPMQueryTrapAll pretty closely.
14481 *
14482 * @returns Strict VBox status code.
14483 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14484 * @param u8TrapNo The trap number.
14485 * @param enmType What type is it (trap/fault/abort), software
14486 * interrupt or hardware interrupt.
14487 * @param uErrCode The error code if applicable.
14488 * @param uCr2 The CR2 value if applicable.
14489 * @param cbInstr The instruction length (only relevant for
14490 * software interrupts).
14491 */
14492VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14493 uint8_t cbInstr)
14494{
14495 iemInitDecoder(pVCpu, false);
14496#ifdef DBGFTRACE_ENABLED
14497 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14498 u8TrapNo, enmType, uErrCode, uCr2);
14499#endif
14500
14501 uint32_t fFlags;
14502 switch (enmType)
14503 {
14504 case TRPM_HARDWARE_INT:
14505 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14506 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14507 uErrCode = uCr2 = 0;
14508 break;
14509
14510 case TRPM_SOFTWARE_INT:
14511 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14512 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14513 uErrCode = uCr2 = 0;
14514 break;
14515
14516 case TRPM_TRAP:
14517 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14518 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14519 if (u8TrapNo == X86_XCPT_PF)
14520 fFlags |= IEM_XCPT_FLAGS_CR2;
14521 switch (u8TrapNo)
14522 {
14523 case X86_XCPT_DF:
14524 case X86_XCPT_TS:
14525 case X86_XCPT_NP:
14526 case X86_XCPT_SS:
14527 case X86_XCPT_PF:
14528 case X86_XCPT_AC:
14529 fFlags |= IEM_XCPT_FLAGS_ERR;
14530 break;
14531
14532 case X86_XCPT_NMI:
14533 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14534 break;
14535 }
14536 break;
14537
14538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14539 }
14540
14541 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14542}
14543
14544
14545/**
14546 * Injects the active TRPM event.
14547 *
14548 * @returns Strict VBox status code.
14549 * @param pVCpu The cross context virtual CPU structure.
14550 */
14551VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14552{
14553#ifndef IEM_IMPLEMENTS_TASKSWITCH
14554 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14555#else
14556 uint8_t u8TrapNo;
14557 TRPMEVENT enmType;
14558 RTGCUINT uErrCode;
14559 RTGCUINTPTR uCr2;
14560 uint8_t cbInstr;
14561 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14562 if (RT_FAILURE(rc))
14563 return rc;
14564
14565 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14566
14567 /** @todo Are there any other codes that imply the event was successfully
14568 * delivered to the guest? See @bugref{6607}. */
14569 if ( rcStrict == VINF_SUCCESS
14570 || rcStrict == VINF_IEM_RAISED_XCPT)
14571 {
14572 TRPMResetTrap(pVCpu);
14573 }
14574 return rcStrict;
14575#endif
14576}
14577
14578
14579VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14580{
14581 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14582 return VERR_NOT_IMPLEMENTED;
14583}
14584
14585
14586VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14587{
14588 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14589 return VERR_NOT_IMPLEMENTED;
14590}
14591
14592
14593#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14594/**
14595 * Executes a IRET instruction with default operand size.
14596 *
14597 * This is for PATM.
14598 *
14599 * @returns VBox status code.
14600 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14601 * @param pCtxCore The register frame.
14602 */
14603VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14604{
14605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14606
14607 iemCtxCoreToCtx(pCtx, pCtxCore);
14608 iemInitDecoder(pVCpu);
14609 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14610 if (rcStrict == VINF_SUCCESS)
14611 iemCtxToCtxCore(pCtxCore, pCtx);
14612 else
14613 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14614 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14615 return rcStrict;
14616}
14617#endif
14618
14619
14620/**
14621 * Macro used by the IEMExec* method to check the given instruction length.
14622 *
14623 * Will return on failure!
14624 *
14625 * @param a_cbInstr The given instruction length.
14626 * @param a_cbMin The minimum length.
14627 */
14628#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14629 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14630 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14631
14632
14633/**
14634 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14635 *
14636 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14637 *
14638 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14640 * @param rcStrict The status code to fiddle.
14641 */
14642DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14643{
14644 iemUninitExec(pVCpu);
14645#ifdef IN_RC
14646 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14647 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14648#else
14649 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14650#endif
14651}
14652
14653
14654/**
14655 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14656 *
14657 * This API ASSUMES that the caller has already verified that the guest code is
14658 * allowed to access the I/O port. (The I/O port is in the DX register in the
14659 * guest state.)
14660 *
14661 * @returns Strict VBox status code.
14662 * @param pVCpu The cross context virtual CPU structure.
14663 * @param cbValue The size of the I/O port access (1, 2, or 4).
14664 * @param enmAddrMode The addressing mode.
14665 * @param fRepPrefix Indicates whether a repeat prefix is used
14666 * (doesn't matter which for this instruction).
14667 * @param cbInstr The instruction length in bytes.
14668 * @param iEffSeg The effective segment address.
14669 * @param fIoChecked Whether the access to the I/O port has been
14670 * checked or not. It's typically checked in the
14671 * HM scenario.
14672 */
14673VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14674 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14675{
14676 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14678
14679 /*
14680 * State init.
14681 */
14682 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14683
14684 /*
14685 * Switch orgy for getting to the right handler.
14686 */
14687 VBOXSTRICTRC rcStrict;
14688 if (fRepPrefix)
14689 {
14690 switch (enmAddrMode)
14691 {
14692 case IEMMODE_16BIT:
14693 switch (cbValue)
14694 {
14695 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14696 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14697 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14698 default:
14699 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14700 }
14701 break;
14702
14703 case IEMMODE_32BIT:
14704 switch (cbValue)
14705 {
14706 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14707 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14708 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14709 default:
14710 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14711 }
14712 break;
14713
14714 case IEMMODE_64BIT:
14715 switch (cbValue)
14716 {
14717 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14718 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14719 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14720 default:
14721 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14722 }
14723 break;
14724
14725 default:
14726 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14727 }
14728 }
14729 else
14730 {
14731 switch (enmAddrMode)
14732 {
14733 case IEMMODE_16BIT:
14734 switch (cbValue)
14735 {
14736 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14737 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14738 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14739 default:
14740 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14741 }
14742 break;
14743
14744 case IEMMODE_32BIT:
14745 switch (cbValue)
14746 {
14747 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14748 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14749 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14750 default:
14751 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14752 }
14753 break;
14754
14755 case IEMMODE_64BIT:
14756 switch (cbValue)
14757 {
14758 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14759 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14760 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14761 default:
14762 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14763 }
14764 break;
14765
14766 default:
14767 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14768 }
14769 }
14770
14771 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14772}
14773
14774
14775/**
14776 * Interface for HM and EM for executing string I/O IN (read) instructions.
14777 *
14778 * This API ASSUMES that the caller has already verified that the guest code is
14779 * allowed to access the I/O port. (The I/O port is in the DX register in the
14780 * guest state.)
14781 *
14782 * @returns Strict VBox status code.
14783 * @param pVCpu The cross context virtual CPU structure.
14784 * @param cbValue The size of the I/O port access (1, 2, or 4).
14785 * @param enmAddrMode The addressing mode.
14786 * @param fRepPrefix Indicates whether a repeat prefix is used
14787 * (doesn't matter which for this instruction).
14788 * @param cbInstr The instruction length in bytes.
14789 * @param fIoChecked Whether the access to the I/O port has been
14790 * checked or not. It's typically checked in the
14791 * HM scenario.
14792 */
14793VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14794 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14795{
14796 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14797
14798 /*
14799 * State init.
14800 */
14801 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14802
14803 /*
14804 * Switch orgy for getting to the right handler.
14805 */
14806 VBOXSTRICTRC rcStrict;
14807 if (fRepPrefix)
14808 {
14809 switch (enmAddrMode)
14810 {
14811 case IEMMODE_16BIT:
14812 switch (cbValue)
14813 {
14814 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14815 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14816 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14817 default:
14818 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14819 }
14820 break;
14821
14822 case IEMMODE_32BIT:
14823 switch (cbValue)
14824 {
14825 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14826 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14827 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14828 default:
14829 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14830 }
14831 break;
14832
14833 case IEMMODE_64BIT:
14834 switch (cbValue)
14835 {
14836 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14837 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14838 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14839 default:
14840 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14841 }
14842 break;
14843
14844 default:
14845 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14846 }
14847 }
14848 else
14849 {
14850 switch (enmAddrMode)
14851 {
14852 case IEMMODE_16BIT:
14853 switch (cbValue)
14854 {
14855 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14856 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14857 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14858 default:
14859 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14860 }
14861 break;
14862
14863 case IEMMODE_32BIT:
14864 switch (cbValue)
14865 {
14866 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14867 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14868 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14869 default:
14870 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14871 }
14872 break;
14873
14874 case IEMMODE_64BIT:
14875 switch (cbValue)
14876 {
14877 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14878 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14879 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14880 default:
14881 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14882 }
14883 break;
14884
14885 default:
14886 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14887 }
14888 }
14889
14890 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14891}
14892
14893
14894/**
14895 * Interface for rawmode to write execute an OUT instruction.
14896 *
14897 * @returns Strict VBox status code.
14898 * @param pVCpu The cross context virtual CPU structure.
14899 * @param cbInstr The instruction length in bytes.
14900 * @param u16Port The port to read.
14901 * @param cbReg The register size.
14902 *
14903 * @remarks In ring-0 not all of the state needs to be synced in.
14904 */
14905VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14906{
14907 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14908 Assert(cbReg <= 4 && cbReg != 3);
14909
14910 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14911 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14912 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14913}
14914
14915
14916/**
14917 * Interface for rawmode to write execute an IN instruction.
14918 *
14919 * @returns Strict VBox status code.
14920 * @param pVCpu The cross context virtual CPU structure.
14921 * @param cbInstr The instruction length in bytes.
14922 * @param u16Port The port to read.
14923 * @param cbReg The register size.
14924 */
14925VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14926{
14927 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14928 Assert(cbReg <= 4 && cbReg != 3);
14929
14930 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14931 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14932 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14933}
14934
14935
14936/**
14937 * Interface for HM and EM to write to a CRx register.
14938 *
14939 * @returns Strict VBox status code.
14940 * @param pVCpu The cross context virtual CPU structure.
14941 * @param cbInstr The instruction length in bytes.
14942 * @param iCrReg The control register number (destination).
14943 * @param iGReg The general purpose register number (source).
14944 *
14945 * @remarks In ring-0 not all of the state needs to be synced in.
14946 */
14947VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14948{
14949 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14950 Assert(iCrReg < 16);
14951 Assert(iGReg < 16);
14952
14953 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14954 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14955 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14956}
14957
14958
14959/**
14960 * Interface for HM and EM to read from a CRx register.
14961 *
14962 * @returns Strict VBox status code.
14963 * @param pVCpu The cross context virtual CPU structure.
14964 * @param cbInstr The instruction length in bytes.
14965 * @param iGReg The general purpose register number (destination).
14966 * @param iCrReg The control register number (source).
14967 *
14968 * @remarks In ring-0 not all of the state needs to be synced in.
14969 */
14970VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14971{
14972 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14973 Assert(iCrReg < 16);
14974 Assert(iGReg < 16);
14975
14976 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14977 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14978 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14979}
14980
14981
14982/**
14983 * Interface for HM and EM to clear the CR0[TS] bit.
14984 *
14985 * @returns Strict VBox status code.
14986 * @param pVCpu The cross context virtual CPU structure.
14987 * @param cbInstr The instruction length in bytes.
14988 *
14989 * @remarks In ring-0 not all of the state needs to be synced in.
14990 */
14991VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14992{
14993 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14994
14995 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14996 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14997 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14998}
14999
15000
15001/**
15002 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15003 *
15004 * @returns Strict VBox status code.
15005 * @param pVCpu The cross context virtual CPU structure.
15006 * @param cbInstr The instruction length in bytes.
15007 * @param uValue The value to load into CR0.
15008 *
15009 * @remarks In ring-0 not all of the state needs to be synced in.
15010 */
15011VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15012{
15013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15014
15015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15016 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15017 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15018}
15019
15020
15021/**
15022 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15023 *
15024 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15025 *
15026 * @returns Strict VBox status code.
15027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15028 * @param cbInstr The instruction length in bytes.
15029 * @remarks In ring-0 not all of the state needs to be synced in.
15030 * @thread EMT(pVCpu)
15031 */
15032VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15033{
15034 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15035
15036 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15037 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15038 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15039}
15040
15041
15042#ifdef VBOX_WITH_NESTED_HWVIRT
15043/**
15044 * Checks if IEM is in the process of delivering an event (interrupt or
15045 * exception).
15046 *
15047 * @returns true if it's raising an interrupt or exception, false otherwise.
15048 * @param pVCpu The cross context virtual CPU structure.
15049 */
15050VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
15051{
15052 return pVCpu->iem.s.cXcptRecursions > 0;
15053}
15054
15055
15056/**
15057 * Interface for HM and EM to emulate the STGI instruction.
15058 *
15059 * @returns Strict VBox status code.
15060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15061 * @param cbInstr The instruction length in bytes.
15062 * @thread EMT(pVCpu)
15063 */
15064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15065{
15066 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15067
15068 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15069 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15070 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15071}
15072
15073
15074/**
15075 * Interface for HM and EM to emulate the STGI instruction.
15076 *
15077 * @returns Strict VBox status code.
15078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15079 * @param cbInstr The instruction length in bytes.
15080 * @thread EMT(pVCpu)
15081 */
15082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15083{
15084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15085
15086 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15087 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15088 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15089}
15090
15091
15092/**
15093 * Interface for HM and EM to emulate the VMLOAD instruction.
15094 *
15095 * @returns Strict VBox status code.
15096 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15097 * @param cbInstr The instruction length in bytes.
15098 * @thread EMT(pVCpu)
15099 */
15100VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15101{
15102 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15103
15104 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15105 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15106 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15107}
15108
15109
15110/**
15111 * Interface for HM and EM to emulate the VMSAVE instruction.
15112 *
15113 * @returns Strict VBox status code.
15114 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15115 * @param cbInstr The instruction length in bytes.
15116 * @thread EMT(pVCpu)
15117 */
15118VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15119{
15120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15121
15122 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15123 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15125}
15126
15127
15128/**
15129 * Interface for HM and EM to emulate the INVLPGA instruction.
15130 *
15131 * @returns Strict VBox status code.
15132 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15133 * @param cbInstr The instruction length in bytes.
15134 * @thread EMT(pVCpu)
15135 */
15136VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15137{
15138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15139
15140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15141 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15142 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15143}
15144#endif /* VBOX_WITH_NESTED_HWVIRT */
15145
15146#ifdef IN_RING3
15147
15148/**
15149 * Handles the unlikely and probably fatal merge cases.
15150 *
15151 * @returns Merged status code.
15152 * @param rcStrict Current EM status code.
15153 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15154 * with @a rcStrict.
15155 * @param iMemMap The memory mapping index. For error reporting only.
15156 * @param pVCpu The cross context virtual CPU structure of the calling
15157 * thread, for error reporting only.
15158 */
15159DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15160 unsigned iMemMap, PVMCPU pVCpu)
15161{
15162 if (RT_FAILURE_NP(rcStrict))
15163 return rcStrict;
15164
15165 if (RT_FAILURE_NP(rcStrictCommit))
15166 return rcStrictCommit;
15167
15168 if (rcStrict == rcStrictCommit)
15169 return rcStrictCommit;
15170
15171 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15172 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15173 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15174 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15175 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15176 return VERR_IOM_FF_STATUS_IPE;
15177}
15178
15179
15180/**
15181 * Helper for IOMR3ProcessForceFlag.
15182 *
15183 * @returns Merged status code.
15184 * @param rcStrict Current EM status code.
15185 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15186 * with @a rcStrict.
15187 * @param iMemMap The memory mapping index. For error reporting only.
15188 * @param pVCpu The cross context virtual CPU structure of the calling
15189 * thread, for error reporting only.
15190 */
15191DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15192{
15193 /* Simple. */
15194 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15195 return rcStrictCommit;
15196
15197 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15198 return rcStrict;
15199
15200 /* EM scheduling status codes. */
15201 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15202 && rcStrict <= VINF_EM_LAST))
15203 {
15204 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15205 && rcStrictCommit <= VINF_EM_LAST))
15206 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15207 }
15208
15209 /* Unlikely */
15210 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15211}
15212
15213
15214/**
15215 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15216 *
15217 * @returns Merge between @a rcStrict and what the commit operation returned.
15218 * @param pVM The cross context VM structure.
15219 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15220 * @param rcStrict The status code returned by ring-0 or raw-mode.
15221 */
15222VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15223{
15224 /*
15225 * Reset the pending commit.
15226 */
15227 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15228 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15229 ("%#x %#x %#x\n",
15230 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15231 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15232
15233 /*
15234 * Commit the pending bounce buffers (usually just one).
15235 */
15236 unsigned cBufs = 0;
15237 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15238 while (iMemMap-- > 0)
15239 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15240 {
15241 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15242 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15243 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15244
15245 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15246 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15247 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15248
15249 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15250 {
15251 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15252 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15253 pbBuf,
15254 cbFirst,
15255 PGMACCESSORIGIN_IEM);
15256 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15257 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15258 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15259 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15260 }
15261
15262 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15263 {
15264 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15265 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15266 pbBuf + cbFirst,
15267 cbSecond,
15268 PGMACCESSORIGIN_IEM);
15269 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15270 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15271 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15272 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15273 }
15274 cBufs++;
15275 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15276 }
15277
15278 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15279 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15280 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15281 pVCpu->iem.s.cActiveMappings = 0;
15282 return rcStrict;
15283}
15284
15285#endif /* IN_RING3 */
15286
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette