VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66357

Last change on this file since 66357 was 66357, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: ifdef.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 588.8 KB
Line 
1/* $Id: IEMAll.cpp 66357 2017-03-30 11:01:33Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218
219/*********************************************************************************************************************************
220* Defined Constants And Macros *
221*********************************************************************************************************************************/
222/** @def IEM_WITH_SETJMP
223 * Enables alternative status code handling using setjmps.
224 *
225 * This adds a bit of expense via the setjmp() call since it saves all the
226 * non-volatile registers. However, it eliminates return code checks and allows
227 * for more optimal return value passing (return regs instead of stack buffer).
228 */
229#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
230# define IEM_WITH_SETJMP
231#endif
232
233/** Temporary hack to disable the double execution. Will be removed in favor
234 * of a dedicated execution mode in EM. */
235//#define IEM_VERIFICATION_MODE_NO_REM
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/** @def IEM_USE_UNALIGNED_DATA_ACCESS
363 * Use unaligned accesses instead of elaborate byte assembly. */
364#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
365# define IEM_USE_UNALIGNED_DATA_ACCESS
366#endif
367
368#ifdef VBOX_WITH_NESTED_HWVIRT
369/**
370 * Check the common SVM instruction preconditions.
371 */
372#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
373 do { \
374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
375 { \
376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
377 return iemRaiseUndefinedOpcode(pVCpu); \
378 } \
379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
380 { \
381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
382 return iemRaiseUndefinedOpcode(pVCpu); \
383 } \
384 if (pVCpu->iem.s.uCpl != 0) \
385 { \
386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
387 return iemRaiseGeneralProtectionFault0(pVCpu); \
388 } \
389 } while (0)
390
391/**
392 * Check if an SVM is enabled.
393 */
394#define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if an SVM control/instruction intercept is set.
398 */
399#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
400
401/**
402 * Check if an SVM read CRx intercept is set.
403 */
404#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
405
406/**
407 * Check if an SVM write CRx intercept is set.
408 */
409#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
410
411/**
412 * Check if an SVM read DRx intercept is set.
413 */
414#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
415
416/**
417 * Check if an SVM write DRx intercept is set.
418 */
419#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
420
421/**
422 * Check if an SVM exception intercept is set.
423 */
424#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
425#endif /* VBOX_WITH_NESTED_HWVIRT */
426
427
428/*********************************************************************************************************************************
429* Global Variables *
430*********************************************************************************************************************************/
431extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
432
433
434/** Function table for the ADD instruction. */
435IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
436{
437 iemAImpl_add_u8, iemAImpl_add_u8_locked,
438 iemAImpl_add_u16, iemAImpl_add_u16_locked,
439 iemAImpl_add_u32, iemAImpl_add_u32_locked,
440 iemAImpl_add_u64, iemAImpl_add_u64_locked
441};
442
443/** Function table for the ADC instruction. */
444IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
445{
446 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
447 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
448 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
449 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
450};
451
452/** Function table for the SUB instruction. */
453IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
454{
455 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
456 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
457 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
458 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
459};
460
461/** Function table for the SBB instruction. */
462IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
463{
464 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
465 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
466 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
467 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
468};
469
470/** Function table for the OR instruction. */
471IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
472{
473 iemAImpl_or_u8, iemAImpl_or_u8_locked,
474 iemAImpl_or_u16, iemAImpl_or_u16_locked,
475 iemAImpl_or_u32, iemAImpl_or_u32_locked,
476 iemAImpl_or_u64, iemAImpl_or_u64_locked
477};
478
479/** Function table for the XOR instruction. */
480IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
481{
482 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
483 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
484 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
485 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
486};
487
488/** Function table for the AND instruction. */
489IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
490{
491 iemAImpl_and_u8, iemAImpl_and_u8_locked,
492 iemAImpl_and_u16, iemAImpl_and_u16_locked,
493 iemAImpl_and_u32, iemAImpl_and_u32_locked,
494 iemAImpl_and_u64, iemAImpl_and_u64_locked
495};
496
497/** Function table for the CMP instruction.
498 * @remarks Making operand order ASSUMPTIONS.
499 */
500IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
501{
502 iemAImpl_cmp_u8, NULL,
503 iemAImpl_cmp_u16, NULL,
504 iemAImpl_cmp_u32, NULL,
505 iemAImpl_cmp_u64, NULL
506};
507
508/** Function table for the TEST instruction.
509 * @remarks Making operand order ASSUMPTIONS.
510 */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
512{
513 iemAImpl_test_u8, NULL,
514 iemAImpl_test_u16, NULL,
515 iemAImpl_test_u32, NULL,
516 iemAImpl_test_u64, NULL
517};
518
519/** Function table for the BT instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
521{
522 NULL, NULL,
523 iemAImpl_bt_u16, NULL,
524 iemAImpl_bt_u32, NULL,
525 iemAImpl_bt_u64, NULL
526};
527
528/** Function table for the BTC instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
530{
531 NULL, NULL,
532 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
533 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
534 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
535};
536
537/** Function table for the BTR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
539{
540 NULL, NULL,
541 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
542 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
543 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
544};
545
546/** Function table for the BTS instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
548{
549 NULL, NULL,
550 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
551 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
552 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
553};
554
555/** Function table for the BSF instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
557{
558 NULL, NULL,
559 iemAImpl_bsf_u16, NULL,
560 iemAImpl_bsf_u32, NULL,
561 iemAImpl_bsf_u64, NULL
562};
563
564/** Function table for the BSR instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
566{
567 NULL, NULL,
568 iemAImpl_bsr_u16, NULL,
569 iemAImpl_bsr_u32, NULL,
570 iemAImpl_bsr_u64, NULL
571};
572
573/** Function table for the IMUL instruction. */
574IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
575{
576 NULL, NULL,
577 iemAImpl_imul_two_u16, NULL,
578 iemAImpl_imul_two_u32, NULL,
579 iemAImpl_imul_two_u64, NULL
580};
581
582/** Group 1 /r lookup table. */
583IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
584{
585 &g_iemAImpl_add,
586 &g_iemAImpl_or,
587 &g_iemAImpl_adc,
588 &g_iemAImpl_sbb,
589 &g_iemAImpl_and,
590 &g_iemAImpl_sub,
591 &g_iemAImpl_xor,
592 &g_iemAImpl_cmp
593};
594
595/** Function table for the INC instruction. */
596IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
597{
598 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
599 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
600 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
601 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
602};
603
604/** Function table for the DEC instruction. */
605IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
606{
607 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
608 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
609 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
610 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
611};
612
613/** Function table for the NEG instruction. */
614IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
615{
616 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
617 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
618 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
619 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
620};
621
622/** Function table for the NOT instruction. */
623IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
624{
625 iemAImpl_not_u8, iemAImpl_not_u8_locked,
626 iemAImpl_not_u16, iemAImpl_not_u16_locked,
627 iemAImpl_not_u32, iemAImpl_not_u32_locked,
628 iemAImpl_not_u64, iemAImpl_not_u64_locked
629};
630
631
632/** Function table for the ROL instruction. */
633IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
634{
635 iemAImpl_rol_u8,
636 iemAImpl_rol_u16,
637 iemAImpl_rol_u32,
638 iemAImpl_rol_u64
639};
640
641/** Function table for the ROR instruction. */
642IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
643{
644 iemAImpl_ror_u8,
645 iemAImpl_ror_u16,
646 iemAImpl_ror_u32,
647 iemAImpl_ror_u64
648};
649
650/** Function table for the RCL instruction. */
651IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
652{
653 iemAImpl_rcl_u8,
654 iemAImpl_rcl_u16,
655 iemAImpl_rcl_u32,
656 iemAImpl_rcl_u64
657};
658
659/** Function table for the RCR instruction. */
660IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
661{
662 iemAImpl_rcr_u8,
663 iemAImpl_rcr_u16,
664 iemAImpl_rcr_u32,
665 iemAImpl_rcr_u64
666};
667
668/** Function table for the SHL instruction. */
669IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
670{
671 iemAImpl_shl_u8,
672 iemAImpl_shl_u16,
673 iemAImpl_shl_u32,
674 iemAImpl_shl_u64
675};
676
677/** Function table for the SHR instruction. */
678IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
679{
680 iemAImpl_shr_u8,
681 iemAImpl_shr_u16,
682 iemAImpl_shr_u32,
683 iemAImpl_shr_u64
684};
685
686/** Function table for the SAR instruction. */
687IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
688{
689 iemAImpl_sar_u8,
690 iemAImpl_sar_u16,
691 iemAImpl_sar_u32,
692 iemAImpl_sar_u64
693};
694
695
696/** Function table for the MUL instruction. */
697IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
698{
699 iemAImpl_mul_u8,
700 iemAImpl_mul_u16,
701 iemAImpl_mul_u32,
702 iemAImpl_mul_u64
703};
704
705/** Function table for the IMUL instruction working implicitly on rAX. */
706IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
707{
708 iemAImpl_imul_u8,
709 iemAImpl_imul_u16,
710 iemAImpl_imul_u32,
711 iemAImpl_imul_u64
712};
713
714/** Function table for the DIV instruction. */
715IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
716{
717 iemAImpl_div_u8,
718 iemAImpl_div_u16,
719 iemAImpl_div_u32,
720 iemAImpl_div_u64
721};
722
723/** Function table for the MUL instruction. */
724IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
725{
726 iemAImpl_idiv_u8,
727 iemAImpl_idiv_u16,
728 iemAImpl_idiv_u32,
729 iemAImpl_idiv_u64
730};
731
732/** Function table for the SHLD instruction */
733IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
734{
735 iemAImpl_shld_u16,
736 iemAImpl_shld_u32,
737 iemAImpl_shld_u64,
738};
739
740/** Function table for the SHRD instruction */
741IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
742{
743 iemAImpl_shrd_u16,
744 iemAImpl_shrd_u32,
745 iemAImpl_shrd_u64,
746};
747
748
749/** Function table for the PUNPCKLBW instruction */
750IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
751/** Function table for the PUNPCKLBD instruction */
752IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
753/** Function table for the PUNPCKLDQ instruction */
754IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
755/** Function table for the PUNPCKLQDQ instruction */
756IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
757
758/** Function table for the PUNPCKHBW instruction */
759IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
760/** Function table for the PUNPCKHBD instruction */
761IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
762/** Function table for the PUNPCKHDQ instruction */
763IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
764/** Function table for the PUNPCKHQDQ instruction */
765IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
766
767/** Function table for the PXOR instruction */
768IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
769/** Function table for the PCMPEQB instruction */
770IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
771/** Function table for the PCMPEQW instruction */
772IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
773/** Function table for the PCMPEQD instruction */
774IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
775
776
777#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
778/** What IEM just wrote. */
779uint8_t g_abIemWrote[256];
780/** How much IEM just wrote. */
781size_t g_cbIemWrote;
782#endif
783
784
785/*********************************************************************************************************************************
786* Internal Functions *
787*********************************************************************************************************************************/
788IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
789IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
790IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
791IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
792/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
793IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
794IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
795IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
796IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
797IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
798IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
799IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
800IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
801IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
802IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
803IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
804IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
805#ifdef IEM_WITH_SETJMP
806DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
807DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
808DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
809DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
810DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
811#endif
812
813IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
814IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
815IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
816IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
817IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
820IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
821IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
822IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
823IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
824IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
825IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
826IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
827IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
828IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
829
830#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
831IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
832#endif
833IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
834IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
835
836
837
838/**
839 * Sets the pass up status.
840 *
841 * @returns VINF_SUCCESS.
842 * @param pVCpu The cross context virtual CPU structure of the
843 * calling thread.
844 * @param rcPassUp The pass up status. Must be informational.
845 * VINF_SUCCESS is not allowed.
846 */
847IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
848{
849 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
850
851 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
852 if (rcOldPassUp == VINF_SUCCESS)
853 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
854 /* If both are EM scheduling codes, use EM priority rules. */
855 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
856 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
857 {
858 if (rcPassUp < rcOldPassUp)
859 {
860 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
861 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
862 }
863 else
864 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
865 }
866 /* Override EM scheduling with specific status code. */
867 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
868 {
869 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
870 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
871 }
872 /* Don't override specific status code, first come first served. */
873 else
874 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
875 return VINF_SUCCESS;
876}
877
878
879/**
880 * Calculates the CPU mode.
881 *
882 * This is mainly for updating IEMCPU::enmCpuMode.
883 *
884 * @returns CPU mode.
885 * @param pCtx The register context for the CPU.
886 */
887DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
888{
889 if (CPUMIsGuestIn64BitCodeEx(pCtx))
890 return IEMMODE_64BIT;
891 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
892 return IEMMODE_32BIT;
893 return IEMMODE_16BIT;
894}
895
896
897/**
898 * Initializes the execution state.
899 *
900 * @param pVCpu The cross context virtual CPU structure of the
901 * calling thread.
902 * @param fBypassHandlers Whether to bypass access handlers.
903 *
904 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
905 * side-effects in strict builds.
906 */
907DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
908{
909 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
910
911 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
912
913#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
915 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
918 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
920 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
921 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
922#endif
923
924#ifdef VBOX_WITH_RAW_MODE_NOT_R0
925 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
926#endif
927 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
928 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
929#ifdef VBOX_STRICT
930 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
931 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
932 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
933 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
934 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
935 pVCpu->iem.s.uRexReg = 127;
936 pVCpu->iem.s.uRexB = 127;
937 pVCpu->iem.s.uRexIndex = 127;
938 pVCpu->iem.s.iEffSeg = 127;
939 pVCpu->iem.s.idxPrefix = 127;
940 pVCpu->iem.s.uVex3rdReg = 127;
941 pVCpu->iem.s.uVexLength = 127;
942 pVCpu->iem.s.fEvexStuff = 127;
943 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
944# ifdef IEM_WITH_CODE_TLB
945 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
946 pVCpu->iem.s.pbInstrBuf = NULL;
947 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
948 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
949 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
950 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
951# else
952 pVCpu->iem.s.offOpcode = 127;
953 pVCpu->iem.s.cbOpcode = 127;
954# endif
955#endif
956
957 pVCpu->iem.s.cActiveMappings = 0;
958 pVCpu->iem.s.iNextMapping = 0;
959 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
960 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
963 && pCtx->cs.u64Base == 0
964 && pCtx->cs.u32Limit == UINT32_MAX
965 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
966 if (!pVCpu->iem.s.fInPatchCode)
967 CPUMRawLeave(pVCpu, VINF_SUCCESS);
968#endif
969
970#ifdef IEM_VERIFICATION_MODE_FULL
971 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
972 pVCpu->iem.s.fNoRem = true;
973#endif
974}
975
976
977/**
978 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
979 *
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling thread.
982 */
983DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
984{
985 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
986#ifdef IEM_VERIFICATION_MODE_FULL
987 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
988#endif
989#ifdef VBOX_STRICT
990# ifdef IEM_WITH_CODE_TLB
991 NOREF(pVCpu);
992# else
993 pVCpu->iem.s.cbOpcode = 0;
994# endif
995#else
996 NOREF(pVCpu);
997#endif
998}
999
1000
1001/**
1002 * Initializes the decoder state.
1003 *
1004 * iemReInitDecoder is mostly a copy of this function.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 * @param fBypassHandlers Whether to bypass access handlers.
1009 */
1010DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1011{
1012 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1013
1014 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1015
1016#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1025#endif
1026
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1029#endif
1030 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1031#ifdef IEM_VERIFICATION_MODE_FULL
1032 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1033 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1034#endif
1035 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1036 pVCpu->iem.s.enmCpuMode = enmMode;
1037 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1038 pVCpu->iem.s.enmEffAddrMode = enmMode;
1039 if (enmMode != IEMMODE_64BIT)
1040 {
1041 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1042 pVCpu->iem.s.enmEffOpSize = enmMode;
1043 }
1044 else
1045 {
1046 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1047 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1048 }
1049 pVCpu->iem.s.fPrefixes = 0;
1050 pVCpu->iem.s.uRexReg = 0;
1051 pVCpu->iem.s.uRexB = 0;
1052 pVCpu->iem.s.uRexIndex = 0;
1053 pVCpu->iem.s.idxPrefix = 0;
1054 pVCpu->iem.s.uVex3rdReg = 0;
1055 pVCpu->iem.s.uVexLength = 0;
1056 pVCpu->iem.s.fEvexStuff = 0;
1057 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1058#ifdef IEM_WITH_CODE_TLB
1059 pVCpu->iem.s.pbInstrBuf = NULL;
1060 pVCpu->iem.s.offInstrNextByte = 0;
1061 pVCpu->iem.s.offCurInstrStart = 0;
1062# ifdef VBOX_STRICT
1063 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1064 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1065 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1066# endif
1067#else
1068 pVCpu->iem.s.offOpcode = 0;
1069 pVCpu->iem.s.cbOpcode = 0;
1070#endif
1071 pVCpu->iem.s.cActiveMappings = 0;
1072 pVCpu->iem.s.iNextMapping = 0;
1073 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1074 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1077 && pCtx->cs.u64Base == 0
1078 && pCtx->cs.u32Limit == UINT32_MAX
1079 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1080 if (!pVCpu->iem.s.fInPatchCode)
1081 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1082#endif
1083
1084#ifdef DBGFTRACE_ENABLED
1085 switch (enmMode)
1086 {
1087 case IEMMODE_64BIT:
1088 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1089 break;
1090 case IEMMODE_32BIT:
1091 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1092 break;
1093 case IEMMODE_16BIT:
1094 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1095 break;
1096 }
1097#endif
1098}
1099
1100
1101/**
1102 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1103 *
1104 * This is mostly a copy of iemInitDecoder.
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1107 */
1108DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1109{
1110 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1111
1112 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1113
1114#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1123#endif
1124
1125 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1126#ifdef IEM_VERIFICATION_MODE_FULL
1127 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1128 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1129#endif
1130 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1131 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1132 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1133 pVCpu->iem.s.enmEffAddrMode = enmMode;
1134 if (enmMode != IEMMODE_64BIT)
1135 {
1136 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1137 pVCpu->iem.s.enmEffOpSize = enmMode;
1138 }
1139 else
1140 {
1141 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1142 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1143 }
1144 pVCpu->iem.s.fPrefixes = 0;
1145 pVCpu->iem.s.uRexReg = 0;
1146 pVCpu->iem.s.uRexB = 0;
1147 pVCpu->iem.s.uRexIndex = 0;
1148 pVCpu->iem.s.idxPrefix = 0;
1149 pVCpu->iem.s.uVex3rdReg = 0;
1150 pVCpu->iem.s.uVexLength = 0;
1151 pVCpu->iem.s.fEvexStuff = 0;
1152 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1153#ifdef IEM_WITH_CODE_TLB
1154 if (pVCpu->iem.s.pbInstrBuf)
1155 {
1156 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1157 - pVCpu->iem.s.uInstrBufPc;
1158 if (off < pVCpu->iem.s.cbInstrBufTotal)
1159 {
1160 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1161 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1162 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1163 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1164 else
1165 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1166 }
1167 else
1168 {
1169 pVCpu->iem.s.pbInstrBuf = NULL;
1170 pVCpu->iem.s.offInstrNextByte = 0;
1171 pVCpu->iem.s.offCurInstrStart = 0;
1172 pVCpu->iem.s.cbInstrBuf = 0;
1173 pVCpu->iem.s.cbInstrBufTotal = 0;
1174 }
1175 }
1176 else
1177 {
1178 pVCpu->iem.s.offInstrNextByte = 0;
1179 pVCpu->iem.s.offCurInstrStart = 0;
1180 pVCpu->iem.s.cbInstrBuf = 0;
1181 pVCpu->iem.s.cbInstrBufTotal = 0;
1182 }
1183#else
1184 pVCpu->iem.s.cbOpcode = 0;
1185 pVCpu->iem.s.offOpcode = 0;
1186#endif
1187 Assert(pVCpu->iem.s.cActiveMappings == 0);
1188 pVCpu->iem.s.iNextMapping = 0;
1189 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1190 Assert(pVCpu->iem.s.fBypassHandlers == false);
1191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1192 if (!pVCpu->iem.s.fInPatchCode)
1193 { /* likely */ }
1194 else
1195 {
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202 }
1203#endif
1204
1205#ifdef DBGFTRACE_ENABLED
1206 switch (enmMode)
1207 {
1208 case IEMMODE_64BIT:
1209 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1210 break;
1211 case IEMMODE_32BIT:
1212 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1213 break;
1214 case IEMMODE_16BIT:
1215 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1216 break;
1217 }
1218#endif
1219}
1220
1221
1222
1223/**
1224 * Prefetch opcodes the first time when starting executing.
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pVCpu The cross context virtual CPU structure of the
1228 * calling thread.
1229 * @param fBypassHandlers Whether to bypass access handlers.
1230 */
1231IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1232{
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1235#endif
1236 iemInitDecoder(pVCpu, fBypassHandlers);
1237
1238#ifdef IEM_WITH_CODE_TLB
1239 /** @todo Do ITLB lookup here. */
1240
1241#else /* !IEM_WITH_CODE_TLB */
1242
1243 /*
1244 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1245 *
1246 * First translate CS:rIP to a physical address.
1247 */
1248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1249 uint32_t cbToTryRead;
1250 RTGCPTR GCPtrPC;
1251 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1252 {
1253 cbToTryRead = PAGE_SIZE;
1254 GCPtrPC = pCtx->rip;
1255 if (IEM_IS_CANONICAL(GCPtrPC))
1256 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1257 else
1258 return iemRaiseGeneralProtectionFault0(pVCpu);
1259 }
1260 else
1261 {
1262 uint32_t GCPtrPC32 = pCtx->eip;
1263 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1264 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1265 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1266 else
1267 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1268 if (cbToTryRead) { /* likely */ }
1269 else /* overflowed */
1270 {
1271 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1272 cbToTryRead = UINT32_MAX;
1273 }
1274 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1275 Assert(GCPtrPC <= UINT32_MAX);
1276 }
1277
1278# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1279 /* Allow interpretation of patch manager code blocks since they can for
1280 instance throw #PFs for perfectly good reasons. */
1281 if (pVCpu->iem.s.fInPatchCode)
1282 {
1283 size_t cbRead = 0;
1284 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1285 AssertRCReturn(rc, rc);
1286 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1287 return VINF_SUCCESS;
1288 }
1289# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1290
1291 RTGCPHYS GCPhys;
1292 uint64_t fFlags;
1293 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1294 if (RT_SUCCESS(rc)) { /* probable */ }
1295 else
1296 {
1297 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1298 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1299 }
1300 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1301 else
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1304 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1305 }
1306 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1307 else
1308 {
1309 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1310 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1311 }
1312 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1313 /** @todo Check reserved bits and such stuff. PGM is better at doing
1314 * that, so do it when implementing the guest virtual address
1315 * TLB... */
1316
1317# ifdef IEM_VERIFICATION_MODE_FULL
1318 /*
1319 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1320 * instruction.
1321 */
1322 /** @todo optimize this differently by not using PGMPhysRead. */
1323 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1324 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1325 if ( offPrevOpcodes < cbOldOpcodes
1326 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1327 {
1328 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1329 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1330 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1331 pVCpu->iem.s.cbOpcode = cbNew;
1332 return VINF_SUCCESS;
1333 }
1334# endif
1335
1336 /*
1337 * Read the bytes at this address.
1338 */
1339 PVM pVM = pVCpu->CTX_SUFF(pVM);
1340# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1341 size_t cbActual;
1342 if ( PATMIsEnabled(pVM)
1343 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1344 {
1345 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1346 Assert(cbActual > 0);
1347 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1348 }
1349 else
1350# endif
1351 {
1352 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1353 if (cbToTryRead > cbLeftOnPage)
1354 cbToTryRead = cbLeftOnPage;
1355 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1356 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1357
1358 if (!pVCpu->iem.s.fBypassHandlers)
1359 {
1360 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1362 { /* likely */ }
1363 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1364 {
1365 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1366 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1368 }
1369 else
1370 {
1371 Log((RT_SUCCESS(rcStrict)
1372 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1373 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1374 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1375 return rcStrict;
1376 }
1377 }
1378 else
1379 {
1380 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1381 if (RT_SUCCESS(rc))
1382 { /* likely */ }
1383 else
1384 {
1385 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1386 GCPtrPC, GCPhys, rc, cbToTryRead));
1387 return rc;
1388 }
1389 }
1390 pVCpu->iem.s.cbOpcode = cbToTryRead;
1391 }
1392#endif /* !IEM_WITH_CODE_TLB */
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Invalidates the IEM TLBs.
1399 *
1400 * This is called internally as well as by PGM when moving GC mappings.
1401 *
1402 * @returns
1403 * @param pVCpu The cross context virtual CPU structure of the calling
1404 * thread.
1405 * @param fVmm Set when PGM calls us with a remapping.
1406 */
1407VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1408{
1409#ifdef IEM_WITH_CODE_TLB
1410 pVCpu->iem.s.cbInstrBufTotal = 0;
1411 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1412 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1413 { /* very likely */ }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1417 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1418 while (i-- > 0)
1419 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1420 }
1421#endif
1422
1423#ifdef IEM_WITH_DATA_TLB
1424 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1425 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1426 { /* very likely */ }
1427 else
1428 {
1429 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1430 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1431 while (i-- > 0)
1432 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1433 }
1434#endif
1435 NOREF(pVCpu); NOREF(fVmm);
1436}
1437
1438
1439/**
1440 * Invalidates a page in the TLBs.
1441 *
1442 * @param pVCpu The cross context virtual CPU structure of the calling
1443 * thread.
1444 * @param GCPtr The address of the page to invalidate
1445 */
1446VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1447{
1448#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1449 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1450 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1451 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1452 uintptr_t idx = (uint8_t)GCPtr;
1453
1454# ifdef IEM_WITH_CODE_TLB
1455 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1456 {
1457 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1458 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1459 pVCpu->iem.s.cbInstrBufTotal = 0;
1460 }
1461# endif
1462
1463# ifdef IEM_WITH_DATA_TLB
1464 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1465 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1466# endif
1467#else
1468 NOREF(pVCpu); NOREF(GCPtr);
1469#endif
1470}
1471
1472
1473/**
1474 * Invalidates the host physical aspects of the IEM TLBs.
1475 *
1476 * This is called internally as well as by PGM when moving GC mappings.
1477 *
1478 * @param pVCpu The cross context virtual CPU structure of the calling
1479 * thread.
1480 */
1481VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1482{
1483#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1484 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1485
1486# ifdef IEM_WITH_CODE_TLB
1487 pVCpu->iem.s.cbInstrBufTotal = 0;
1488# endif
1489 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1490 if (uTlbPhysRev != 0)
1491 {
1492 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1493 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1494 }
1495 else
1496 {
1497 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1498 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1499
1500 unsigned i;
1501# ifdef IEM_WITH_CODE_TLB
1502 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1503 while (i-- > 0)
1504 {
1505 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1506 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1507 }
1508# endif
1509# ifdef IEM_WITH_DATA_TLB
1510 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1511 while (i-- > 0)
1512 {
1513 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1514 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1515 }
1516# endif
1517 }
1518#else
1519 NOREF(pVCpu);
1520#endif
1521}
1522
1523
1524/**
1525 * Invalidates the host physical aspects of the IEM TLBs.
1526 *
1527 * This is called internally as well as by PGM when moving GC mappings.
1528 *
1529 * @param pVM The cross context VM structure.
1530 *
1531 * @remarks Caller holds the PGM lock.
1532 */
1533VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1534{
1535 RT_NOREF_PV(pVM);
1536}
1537
1538#ifdef IEM_WITH_CODE_TLB
1539
1540/**
1541 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1542 * failure and jumps.
1543 *
1544 * We end up here for a number of reasons:
1545 * - pbInstrBuf isn't yet initialized.
1546 * - Advancing beyond the buffer boundrary (e.g. cross page).
1547 * - Advancing beyond the CS segment limit.
1548 * - Fetching from non-mappable page (e.g. MMIO).
1549 *
1550 * @param pVCpu The cross context virtual CPU structure of the
1551 * calling thread.
1552 * @param pvDst Where to return the bytes.
1553 * @param cbDst Number of bytes to read.
1554 *
1555 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1556 */
1557IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1558{
1559#ifdef IN_RING3
1560//__debugbreak();
1561 for (;;)
1562 {
1563 Assert(cbDst <= 8);
1564 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1565
1566 /*
1567 * We might have a partial buffer match, deal with that first to make the
1568 * rest simpler. This is the first part of the cross page/buffer case.
1569 */
1570 if (pVCpu->iem.s.pbInstrBuf != NULL)
1571 {
1572 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1573 {
1574 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1575 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1576 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1577
1578 cbDst -= cbCopy;
1579 pvDst = (uint8_t *)pvDst + cbCopy;
1580 offBuf += cbCopy;
1581 pVCpu->iem.s.offInstrNextByte += offBuf;
1582 }
1583 }
1584
1585 /*
1586 * Check segment limit, figuring how much we're allowed to access at this point.
1587 *
1588 * We will fault immediately if RIP is past the segment limit / in non-canonical
1589 * territory. If we do continue, there are one or more bytes to read before we
1590 * end up in trouble and we need to do that first before faulting.
1591 */
1592 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1593 RTGCPTR GCPtrFirst;
1594 uint32_t cbMaxRead;
1595 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1596 {
1597 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1598 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1599 { /* likely */ }
1600 else
1601 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1602 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1603 }
1604 else
1605 {
1606 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1607 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1608 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1609 { /* likely */ }
1610 else
1611 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1612 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1613 if (cbMaxRead != 0)
1614 { /* likely */ }
1615 else
1616 {
1617 /* Overflowed because address is 0 and limit is max. */
1618 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1619 cbMaxRead = X86_PAGE_SIZE;
1620 }
1621 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1622 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1623 if (cbMaxRead2 < cbMaxRead)
1624 cbMaxRead = cbMaxRead2;
1625 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1626 }
1627
1628 /*
1629 * Get the TLB entry for this piece of code.
1630 */
1631 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1632 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1633 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1634 if (pTlbe->uTag == uTag)
1635 {
1636 /* likely when executing lots of code, otherwise unlikely */
1637# ifdef VBOX_WITH_STATISTICS
1638 pVCpu->iem.s.CodeTlb.cTlbHits++;
1639# endif
1640 }
1641 else
1642 {
1643 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1644# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1645 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1646 {
1647 pTlbe->uTag = uTag;
1648 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1649 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1650 pTlbe->GCPhys = NIL_RTGCPHYS;
1651 pTlbe->pbMappingR3 = NULL;
1652 }
1653 else
1654# endif
1655 {
1656 RTGCPHYS GCPhys;
1657 uint64_t fFlags;
1658 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1659 if (RT_FAILURE(rc))
1660 {
1661 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1662 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1663 }
1664
1665 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1666 pTlbe->uTag = uTag;
1667 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1668 pTlbe->GCPhys = GCPhys;
1669 pTlbe->pbMappingR3 = NULL;
1670 }
1671 }
1672
1673 /*
1674 * Check TLB page table level access flags.
1675 */
1676 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1677 {
1678 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1679 {
1680 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1681 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1682 }
1683 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1684 {
1685 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1686 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1687 }
1688 }
1689
1690# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1691 /*
1692 * Allow interpretation of patch manager code blocks since they can for
1693 * instance throw #PFs for perfectly good reasons.
1694 */
1695 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1696 { /* no unlikely */ }
1697 else
1698 {
1699 /** @todo Could be optimized this a little in ring-3 if we liked. */
1700 size_t cbRead = 0;
1701 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1702 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1703 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1704 return;
1705 }
1706# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1707
1708 /*
1709 * Look up the physical page info if necessary.
1710 */
1711 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1712 { /* not necessary */ }
1713 else
1714 {
1715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1716 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1717 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1721 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1722 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1723 }
1724
1725# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1726 /*
1727 * Try do a direct read using the pbMappingR3 pointer.
1728 */
1729 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1730 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1731 {
1732 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1733 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1734 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1735 {
1736 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1737 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1738 }
1739 else
1740 {
1741 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1742 Assert(cbInstr < cbMaxRead);
1743 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1744 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1745 }
1746 if (cbDst <= cbMaxRead)
1747 {
1748 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1749 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1750 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1751 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1752 return;
1753 }
1754 pVCpu->iem.s.pbInstrBuf = NULL;
1755
1756 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1757 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1758 }
1759 else
1760# endif
1761#if 0
1762 /*
1763 * If there is no special read handling, so we can read a bit more and
1764 * put it in the prefetch buffer.
1765 */
1766 if ( cbDst < cbMaxRead
1767 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1768 {
1769 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1770 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1772 { /* likely */ }
1773 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1776 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1777 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1778 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1779 }
1780 else
1781 {
1782 Log((RT_SUCCESS(rcStrict)
1783 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1784 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1785 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1786 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1787 }
1788 }
1789 /*
1790 * Special read handling, so only read exactly what's needed.
1791 * This is a highly unlikely scenario.
1792 */
1793 else
1794#endif
1795 {
1796 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1797 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1798 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1799 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1801 { /* likely */ }
1802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1803 {
1804 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1805 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1806 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1807 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1808 }
1809 else
1810 {
1811 Log((RT_SUCCESS(rcStrict)
1812 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1813 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1814 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1815 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1816 }
1817 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1818 if (cbToRead == cbDst)
1819 return;
1820 }
1821
1822 /*
1823 * More to read, loop.
1824 */
1825 cbDst -= cbMaxRead;
1826 pvDst = (uint8_t *)pvDst + cbMaxRead;
1827 }
1828#else
1829 RT_NOREF(pvDst, cbDst);
1830 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1831#endif
1832}
1833
1834#else
1835
1836/**
1837 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1838 * exception if it fails.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pVCpu The cross context virtual CPU structure of the
1842 * calling thread.
1843 * @param cbMin The minimum number of bytes relative offOpcode
1844 * that must be read.
1845 */
1846IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1847{
1848 /*
1849 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1850 *
1851 * First translate CS:rIP to a physical address.
1852 */
1853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1854 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1855 uint32_t cbToTryRead;
1856 RTGCPTR GCPtrNext;
1857 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1858 {
1859 cbToTryRead = PAGE_SIZE;
1860 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1861 if (!IEM_IS_CANONICAL(GCPtrNext))
1862 return iemRaiseGeneralProtectionFault0(pVCpu);
1863 }
1864 else
1865 {
1866 uint32_t GCPtrNext32 = pCtx->eip;
1867 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1868 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1869 if (GCPtrNext32 > pCtx->cs.u32Limit)
1870 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1871 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1872 if (!cbToTryRead) /* overflowed */
1873 {
1874 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1875 cbToTryRead = UINT32_MAX;
1876 /** @todo check out wrapping around the code segment. */
1877 }
1878 if (cbToTryRead < cbMin - cbLeft)
1879 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1880 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1881 }
1882
1883 /* Only read up to the end of the page, and make sure we don't read more
1884 than the opcode buffer can hold. */
1885 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1886 if (cbToTryRead > cbLeftOnPage)
1887 cbToTryRead = cbLeftOnPage;
1888 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1889 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1890/** @todo r=bird: Convert assertion into undefined opcode exception? */
1891 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1892
1893# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1894 /* Allow interpretation of patch manager code blocks since they can for
1895 instance throw #PFs for perfectly good reasons. */
1896 if (pVCpu->iem.s.fInPatchCode)
1897 {
1898 size_t cbRead = 0;
1899 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1900 AssertRCReturn(rc, rc);
1901 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1902 return VINF_SUCCESS;
1903 }
1904# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1905
1906 RTGCPHYS GCPhys;
1907 uint64_t fFlags;
1908 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1909 if (RT_FAILURE(rc))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1912 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1913 }
1914 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1915 {
1916 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1917 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1918 }
1919 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1922 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1923 }
1924 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1925 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1926 /** @todo Check reserved bits and such stuff. PGM is better at doing
1927 * that, so do it when implementing the guest virtual address
1928 * TLB... */
1929
1930 /*
1931 * Read the bytes at this address.
1932 *
1933 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1934 * and since PATM should only patch the start of an instruction there
1935 * should be no need to check again here.
1936 */
1937 if (!pVCpu->iem.s.fBypassHandlers)
1938 {
1939 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1940 cbToTryRead, PGMACCESSORIGIN_IEM);
1941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1942 { /* likely */ }
1943 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1944 {
1945 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1946 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1947 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1948 }
1949 else
1950 {
1951 Log((RT_SUCCESS(rcStrict)
1952 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1953 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1954 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1955 return rcStrict;
1956 }
1957 }
1958 else
1959 {
1960 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1961 if (RT_SUCCESS(rc))
1962 { /* likely */ }
1963 else
1964 {
1965 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1966 return rc;
1967 }
1968 }
1969 pVCpu->iem.s.cbOpcode += cbToTryRead;
1970 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1971
1972 return VINF_SUCCESS;
1973}
1974
1975#endif /* !IEM_WITH_CODE_TLB */
1976#ifndef IEM_WITH_SETJMP
1977
1978/**
1979 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1980 *
1981 * @returns Strict VBox status code.
1982 * @param pVCpu The cross context virtual CPU structure of the
1983 * calling thread.
1984 * @param pb Where to return the opcode byte.
1985 */
1986DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1987{
1988 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1989 if (rcStrict == VINF_SUCCESS)
1990 {
1991 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1992 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1993 pVCpu->iem.s.offOpcode = offOpcode + 1;
1994 }
1995 else
1996 *pb = 0;
1997 return rcStrict;
1998}
1999
2000
2001/**
2002 * Fetches the next opcode byte.
2003 *
2004 * @returns Strict VBox status code.
2005 * @param pVCpu The cross context virtual CPU structure of the
2006 * calling thread.
2007 * @param pu8 Where to return the opcode byte.
2008 */
2009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2010{
2011 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2012 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2013 {
2014 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2015 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2016 return VINF_SUCCESS;
2017 }
2018 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2019}
2020
2021#else /* IEM_WITH_SETJMP */
2022
2023/**
2024 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2025 *
2026 * @returns The opcode byte.
2027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2028 */
2029DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2030{
2031# ifdef IEM_WITH_CODE_TLB
2032 uint8_t u8;
2033 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2034 return u8;
2035# else
2036 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2037 if (rcStrict == VINF_SUCCESS)
2038 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2039 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2040# endif
2041}
2042
2043
2044/**
2045 * Fetches the next opcode byte, longjmp on error.
2046 *
2047 * @returns The opcode byte.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 */
2050DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2051{
2052# ifdef IEM_WITH_CODE_TLB
2053 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2054 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2055 if (RT_LIKELY( pbBuf != NULL
2056 && offBuf < pVCpu->iem.s.cbInstrBuf))
2057 {
2058 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2059 return pbBuf[offBuf];
2060 }
2061# else
2062 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 return pVCpu->iem.s.abOpcode[offOpcode];
2067 }
2068# endif
2069 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2070}
2071
2072#endif /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Fetches the next opcode byte, returns automatically on failure.
2076 *
2077 * @param a_pu8 Where to return the opcode byte.
2078 * @remark Implicitly references pVCpu.
2079 */
2080#ifndef IEM_WITH_SETJMP
2081# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2082 do \
2083 { \
2084 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2085 if (rcStrict2 == VINF_SUCCESS) \
2086 { /* likely */ } \
2087 else \
2088 return rcStrict2; \
2089 } while (0)
2090#else
2091# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2092#endif /* IEM_WITH_SETJMP */
2093
2094
2095#ifndef IEM_WITH_SETJMP
2096/**
2097 * Fetches the next signed byte from the opcode stream.
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 * @param pi8 Where to return the signed byte.
2102 */
2103DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2104{
2105 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2106}
2107#endif /* !IEM_WITH_SETJMP */
2108
2109
2110/**
2111 * Fetches the next signed byte from the opcode stream, returning automatically
2112 * on failure.
2113 *
2114 * @param a_pi8 Where to return the signed byte.
2115 * @remark Implicitly references pVCpu.
2116 */
2117#ifndef IEM_WITH_SETJMP
2118# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2119 do \
2120 { \
2121 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2122 if (rcStrict2 != VINF_SUCCESS) \
2123 return rcStrict2; \
2124 } while (0)
2125#else /* IEM_WITH_SETJMP */
2126# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2127
2128#endif /* IEM_WITH_SETJMP */
2129
2130#ifndef IEM_WITH_SETJMP
2131
2132/**
2133 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 * @param pu16 Where to return the opcode dword.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2140{
2141 uint8_t u8;
2142 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2143 if (rcStrict == VINF_SUCCESS)
2144 *pu16 = (int8_t)u8;
2145 return rcStrict;
2146}
2147
2148
2149/**
2150 * Fetches the next signed byte from the opcode stream, extending it to
2151 * unsigned 16-bit.
2152 *
2153 * @returns Strict VBox status code.
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pu16 Where to return the unsigned word.
2156 */
2157DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2158{
2159 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2160 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2161 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2162
2163 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2164 pVCpu->iem.s.offOpcode = offOpcode + 1;
2165 return VINF_SUCCESS;
2166}
2167
2168#endif /* !IEM_WITH_SETJMP */
2169
2170/**
2171 * Fetches the next signed byte from the opcode stream and sign-extending it to
2172 * a word, returning automatically on failure.
2173 *
2174 * @param a_pu16 Where to return the word.
2175 * @remark Implicitly references pVCpu.
2176 */
2177#ifndef IEM_WITH_SETJMP
2178# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2179 do \
2180 { \
2181 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2182 if (rcStrict2 != VINF_SUCCESS) \
2183 return rcStrict2; \
2184 } while (0)
2185#else
2186# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2187#endif
2188
2189#ifndef IEM_WITH_SETJMP
2190
2191/**
2192 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2196 * @param pu32 Where to return the opcode dword.
2197 */
2198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2199{
2200 uint8_t u8;
2201 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2202 if (rcStrict == VINF_SUCCESS)
2203 *pu32 = (int8_t)u8;
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Fetches the next signed byte from the opcode stream, extending it to
2210 * unsigned 32-bit.
2211 *
2212 * @returns Strict VBox status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param pu32 Where to return the unsigned dword.
2215 */
2216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2217{
2218 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2219 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2220 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2221
2222 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2223 pVCpu->iem.s.offOpcode = offOpcode + 1;
2224 return VINF_SUCCESS;
2225}
2226
2227#endif /* !IEM_WITH_SETJMP */
2228
2229/**
2230 * Fetches the next signed byte from the opcode stream and sign-extending it to
2231 * a word, returning automatically on failure.
2232 *
2233 * @param a_pu32 Where to return the word.
2234 * @remark Implicitly references pVCpu.
2235 */
2236#ifndef IEM_WITH_SETJMP
2237#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2238 do \
2239 { \
2240 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2241 if (rcStrict2 != VINF_SUCCESS) \
2242 return rcStrict2; \
2243 } while (0)
2244#else
2245# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2246#endif
2247
2248#ifndef IEM_WITH_SETJMP
2249
2250/**
2251 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pu64 Where to return the opcode qword.
2256 */
2257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2258{
2259 uint8_t u8;
2260 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2261 if (rcStrict == VINF_SUCCESS)
2262 *pu64 = (int8_t)u8;
2263 return rcStrict;
2264}
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, extending it to
2269 * unsigned 64-bit.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pu64 Where to return the unsigned qword.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2276{
2277 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2278 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2279 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2280
2281 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2282 pVCpu->iem.s.offOpcode = offOpcode + 1;
2283 return VINF_SUCCESS;
2284}
2285
2286#endif /* !IEM_WITH_SETJMP */
2287
2288
2289/**
2290 * Fetches the next signed byte from the opcode stream and sign-extending it to
2291 * a word, returning automatically on failure.
2292 *
2293 * @param a_pu64 Where to return the word.
2294 * @remark Implicitly references pVCpu.
2295 */
2296#ifndef IEM_WITH_SETJMP
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2298 do \
2299 { \
2300 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2301 if (rcStrict2 != VINF_SUCCESS) \
2302 return rcStrict2; \
2303 } while (0)
2304#else
2305# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2306#endif
2307
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu16 Where to return the opcode word.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2319{
2320 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2326# else
2327 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2328# endif
2329 pVCpu->iem.s.offOpcode = offOpcode + 2;
2330 }
2331 else
2332 *pu16 = 0;
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * Fetches the next opcode word.
2339 *
2340 * @returns Strict VBox status code.
2341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2342 * @param pu16 Where to return the opcode word.
2343 */
2344DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2345{
2346 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2347 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2348 {
2349 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2350# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2351 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2352# else
2353 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2354# endif
2355 return VINF_SUCCESS;
2356 }
2357 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2358}
2359
2360#else /* IEM_WITH_SETJMP */
2361
2362/**
2363 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2364 *
2365 * @returns The opcode word.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 */
2368DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2369{
2370# ifdef IEM_WITH_CODE_TLB
2371 uint16_t u16;
2372 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2373 return u16;
2374# else
2375 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2376 if (rcStrict == VINF_SUCCESS)
2377 {
2378 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2379 pVCpu->iem.s.offOpcode += 2;
2380# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2381 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2382# else
2383 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2384# endif
2385 }
2386 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2387# endif
2388}
2389
2390
2391/**
2392 * Fetches the next opcode word, longjmp on error.
2393 *
2394 * @returns The opcode word.
2395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2396 */
2397DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2398{
2399# ifdef IEM_WITH_CODE_TLB
2400 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2401 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2402 if (RT_LIKELY( pbBuf != NULL
2403 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2404 {
2405 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2406# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2407 return *(uint16_t const *)&pbBuf[offBuf];
2408# else
2409 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2410# endif
2411 }
2412# else
2413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2414 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2415 {
2416 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2419# else
2420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2421# endif
2422 }
2423# endif
2424 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2425}
2426
2427#endif /* IEM_WITH_SETJMP */
2428
2429
2430/**
2431 * Fetches the next opcode word, returns automatically on failure.
2432 *
2433 * @param a_pu16 Where to return the opcode word.
2434 * @remark Implicitly references pVCpu.
2435 */
2436#ifndef IEM_WITH_SETJMP
2437# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2438 do \
2439 { \
2440 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2441 if (rcStrict2 != VINF_SUCCESS) \
2442 return rcStrict2; \
2443 } while (0)
2444#else
2445# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2446#endif
2447
2448#ifndef IEM_WITH_SETJMP
2449
2450/**
2451 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2452 *
2453 * @returns Strict VBox status code.
2454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2455 * @param pu32 Where to return the opcode double word.
2456 */
2457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2458{
2459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2460 if (rcStrict == VINF_SUCCESS)
2461 {
2462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2463 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2464 pVCpu->iem.s.offOpcode = offOpcode + 2;
2465 }
2466 else
2467 *pu32 = 0;
2468 return rcStrict;
2469}
2470
2471
2472/**
2473 * Fetches the next opcode word, zero extending it to a double word.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2477 * @param pu32 Where to return the opcode double word.
2478 */
2479DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2480{
2481 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2482 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2483 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2484
2485 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486 pVCpu->iem.s.offOpcode = offOpcode + 2;
2487 return VINF_SUCCESS;
2488}
2489
2490#endif /* !IEM_WITH_SETJMP */
2491
2492
2493/**
2494 * Fetches the next opcode word and zero extends it to a double word, returns
2495 * automatically on failure.
2496 *
2497 * @param a_pu32 Where to return the opcode double word.
2498 * @remark Implicitly references pVCpu.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2502 do \
2503 { \
2504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2505 if (rcStrict2 != VINF_SUCCESS) \
2506 return rcStrict2; \
2507 } while (0)
2508#else
2509# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2510#endif
2511
2512#ifndef IEM_WITH_SETJMP
2513
2514/**
2515 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2516 *
2517 * @returns Strict VBox status code.
2518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2519 * @param pu64 Where to return the opcode quad word.
2520 */
2521DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2522{
2523 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2524 if (rcStrict == VINF_SUCCESS)
2525 {
2526 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2527 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528 pVCpu->iem.s.offOpcode = offOpcode + 2;
2529 }
2530 else
2531 *pu64 = 0;
2532 return rcStrict;
2533}
2534
2535
2536/**
2537 * Fetches the next opcode word, zero extending it to a quad word.
2538 *
2539 * @returns Strict VBox status code.
2540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2541 * @param pu64 Where to return the opcode quad word.
2542 */
2543DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2544{
2545 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2546 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2547 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2548
2549 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2550 pVCpu->iem.s.offOpcode = offOpcode + 2;
2551 return VINF_SUCCESS;
2552}
2553
2554#endif /* !IEM_WITH_SETJMP */
2555
2556/**
2557 * Fetches the next opcode word and zero extends it to a quad word, returns
2558 * automatically on failure.
2559 *
2560 * @param a_pu64 Where to return the opcode quad word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575
2576#ifndef IEM_WITH_SETJMP
2577/**
2578 * Fetches the next signed word from the opcode stream.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pi16 Where to return the signed word.
2583 */
2584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2585{
2586 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2587}
2588#endif /* !IEM_WITH_SETJMP */
2589
2590
2591/**
2592 * Fetches the next signed word from the opcode stream, returning automatically
2593 * on failure.
2594 *
2595 * @param a_pi16 Where to return the signed word.
2596 * @remark Implicitly references pVCpu.
2597 */
2598#ifndef IEM_WITH_SETJMP
2599# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2600 do \
2601 { \
2602 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2603 if (rcStrict2 != VINF_SUCCESS) \
2604 return rcStrict2; \
2605 } while (0)
2606#else
2607# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2608#endif
2609
2610#ifndef IEM_WITH_SETJMP
2611
2612/**
2613 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2614 *
2615 * @returns Strict VBox status code.
2616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2617 * @param pu32 Where to return the opcode dword.
2618 */
2619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2620{
2621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2622 if (rcStrict == VINF_SUCCESS)
2623 {
2624 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 pVCpu->iem.s.offOpcode = offOpcode + 4;
2634 }
2635 else
2636 *pu32 = 0;
2637 return rcStrict;
2638}
2639
2640
2641/**
2642 * Fetches the next opcode dword.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu32 Where to return the opcode double word.
2647 */
2648DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2649{
2650 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2651 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2652 {
2653 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2656# else
2657 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2658 pVCpu->iem.s.abOpcode[offOpcode + 1],
2659 pVCpu->iem.s.abOpcode[offOpcode + 2],
2660 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2661# endif
2662 return VINF_SUCCESS;
2663 }
2664 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2665}
2666
2667#else /* !IEM_WITH_SETJMP */
2668
2669/**
2670 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2671 *
2672 * @returns The opcode dword.
2673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2674 */
2675DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2676{
2677# ifdef IEM_WITH_CODE_TLB
2678 uint32_t u32;
2679 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2680 return u32;
2681# else
2682 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2683 if (rcStrict == VINF_SUCCESS)
2684 {
2685 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2686 pVCpu->iem.s.offOpcode = offOpcode + 4;
2687# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2688 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2689# else
2690 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2691 pVCpu->iem.s.abOpcode[offOpcode + 1],
2692 pVCpu->iem.s.abOpcode[offOpcode + 2],
2693 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2694# endif
2695 }
2696 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2697# endif
2698}
2699
2700
2701/**
2702 * Fetches the next opcode dword, longjmp on error.
2703 *
2704 * @returns The opcode dword.
2705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2706 */
2707DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2708{
2709# ifdef IEM_WITH_CODE_TLB
2710 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2711 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2712 if (RT_LIKELY( pbBuf != NULL
2713 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2714 {
2715 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2716# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2717 return *(uint32_t const *)&pbBuf[offBuf];
2718# else
2719 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2720 pbBuf[offBuf + 1],
2721 pbBuf[offBuf + 2],
2722 pbBuf[offBuf + 3]);
2723# endif
2724 }
2725# else
2726 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2727 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2728 {
2729 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2731 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2732# else
2733 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2734 pVCpu->iem.s.abOpcode[offOpcode + 1],
2735 pVCpu->iem.s.abOpcode[offOpcode + 2],
2736 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2737# endif
2738 }
2739# endif
2740 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2741}
2742
2743#endif /* !IEM_WITH_SETJMP */
2744
2745
2746/**
2747 * Fetches the next opcode dword, returns automatically on failure.
2748 *
2749 * @param a_pu32 Where to return the opcode dword.
2750 * @remark Implicitly references pVCpu.
2751 */
2752#ifndef IEM_WITH_SETJMP
2753# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2754 do \
2755 { \
2756 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2757 if (rcStrict2 != VINF_SUCCESS) \
2758 return rcStrict2; \
2759 } while (0)
2760#else
2761# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2762#endif
2763
2764#ifndef IEM_WITH_SETJMP
2765
2766/**
2767 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode dword.
2772 */
2773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2776 if (rcStrict == VINF_SUCCESS)
2777 {
2778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2779 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2780 pVCpu->iem.s.abOpcode[offOpcode + 1],
2781 pVCpu->iem.s.abOpcode[offOpcode + 2],
2782 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2783 pVCpu->iem.s.offOpcode = offOpcode + 4;
2784 }
2785 else
2786 *pu64 = 0;
2787 return rcStrict;
2788}
2789
2790
2791/**
2792 * Fetches the next opcode dword, zero extending it to a quad word.
2793 *
2794 * @returns Strict VBox status code.
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param pu64 Where to return the opcode quad word.
2797 */
2798DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2799{
2800 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2801 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2802 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2803
2804 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2805 pVCpu->iem.s.abOpcode[offOpcode + 1],
2806 pVCpu->iem.s.abOpcode[offOpcode + 2],
2807 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2808 pVCpu->iem.s.offOpcode = offOpcode + 4;
2809 return VINF_SUCCESS;
2810}
2811
2812#endif /* !IEM_WITH_SETJMP */
2813
2814
2815/**
2816 * Fetches the next opcode dword and zero extends it to a quad word, returns
2817 * automatically on failure.
2818 *
2819 * @param a_pu64 Where to return the opcode quad word.
2820 * @remark Implicitly references pVCpu.
2821 */
2822#ifndef IEM_WITH_SETJMP
2823# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2824 do \
2825 { \
2826 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2827 if (rcStrict2 != VINF_SUCCESS) \
2828 return rcStrict2; \
2829 } while (0)
2830#else
2831# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2832#endif
2833
2834
2835#ifndef IEM_WITH_SETJMP
2836/**
2837 * Fetches the next signed double word from the opcode stream.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pi32 Where to return the signed double word.
2842 */
2843DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2844{
2845 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2846}
2847#endif
2848
2849/**
2850 * Fetches the next signed double word from the opcode stream, returning
2851 * automatically on failure.
2852 *
2853 * @param a_pi32 Where to return the signed double word.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode qword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, sign extending it into a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2907
2908 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 *pu64 = i32;
2913 pVCpu->iem.s.offOpcode = offOpcode + 4;
2914 return VINF_SUCCESS;
2915}
2916
2917#endif /* !IEM_WITH_SETJMP */
2918
2919
2920/**
2921 * Fetches the next opcode double word and sign extends it to a quad word,
2922 * returns automatically on failure.
2923 *
2924 * @param a_pu64 Where to return the opcode quad word.
2925 * @remark Implicitly references pVCpu.
2926 */
2927#ifndef IEM_WITH_SETJMP
2928# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2929 do \
2930 { \
2931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2932 if (rcStrict2 != VINF_SUCCESS) \
2933 return rcStrict2; \
2934 } while (0)
2935#else
2936# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2937#endif
2938
2939#ifndef IEM_WITH_SETJMP
2940
2941/**
2942 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2943 *
2944 * @returns Strict VBox status code.
2945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2946 * @param pu64 Where to return the opcode qword.
2947 */
2948DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2949{
2950 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2951 if (rcStrict == VINF_SUCCESS)
2952 {
2953 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2956# else
2957 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2958 pVCpu->iem.s.abOpcode[offOpcode + 1],
2959 pVCpu->iem.s.abOpcode[offOpcode + 2],
2960 pVCpu->iem.s.abOpcode[offOpcode + 3],
2961 pVCpu->iem.s.abOpcode[offOpcode + 4],
2962 pVCpu->iem.s.abOpcode[offOpcode + 5],
2963 pVCpu->iem.s.abOpcode[offOpcode + 6],
2964 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2965# endif
2966 pVCpu->iem.s.offOpcode = offOpcode + 8;
2967 }
2968 else
2969 *pu64 = 0;
2970 return rcStrict;
2971}
2972
2973
2974/**
2975 * Fetches the next opcode qword.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2984 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2985 {
2986# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2987 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2988# else
2989 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2990 pVCpu->iem.s.abOpcode[offOpcode + 1],
2991 pVCpu->iem.s.abOpcode[offOpcode + 2],
2992 pVCpu->iem.s.abOpcode[offOpcode + 3],
2993 pVCpu->iem.s.abOpcode[offOpcode + 4],
2994 pVCpu->iem.s.abOpcode[offOpcode + 5],
2995 pVCpu->iem.s.abOpcode[offOpcode + 6],
2996 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2997# endif
2998 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2999 return VINF_SUCCESS;
3000 }
3001 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3002}
3003
3004#else /* IEM_WITH_SETJMP */
3005
3006/**
3007 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3008 *
3009 * @returns The opcode qword.
3010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3013{
3014# ifdef IEM_WITH_CODE_TLB
3015 uint64_t u64;
3016 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3017 return u64;
3018# else
3019 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3020 if (rcStrict == VINF_SUCCESS)
3021 {
3022 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3023 pVCpu->iem.s.offOpcode = offOpcode + 8;
3024# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3025 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3026# else
3027 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3028 pVCpu->iem.s.abOpcode[offOpcode + 1],
3029 pVCpu->iem.s.abOpcode[offOpcode + 2],
3030 pVCpu->iem.s.abOpcode[offOpcode + 3],
3031 pVCpu->iem.s.abOpcode[offOpcode + 4],
3032 pVCpu->iem.s.abOpcode[offOpcode + 5],
3033 pVCpu->iem.s.abOpcode[offOpcode + 6],
3034 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3035# endif
3036 }
3037 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3038# endif
3039}
3040
3041
3042/**
3043 * Fetches the next opcode qword, longjmp on error.
3044 *
3045 * @returns The opcode qword.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 */
3048DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3049{
3050# ifdef IEM_WITH_CODE_TLB
3051 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3052 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3053 if (RT_LIKELY( pbBuf != NULL
3054 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3055 {
3056 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 return *(uint64_t const *)&pbBuf[offBuf];
3059# else
3060 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3061 pbBuf[offBuf + 1],
3062 pbBuf[offBuf + 2],
3063 pbBuf[offBuf + 3],
3064 pbBuf[offBuf + 4],
3065 pbBuf[offBuf + 5],
3066 pbBuf[offBuf + 6],
3067 pbBuf[offBuf + 7]);
3068# endif
3069 }
3070# else
3071 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3072 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3073 {
3074 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3075# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3076 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3077# else
3078 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3],
3082 pVCpu->iem.s.abOpcode[offOpcode + 4],
3083 pVCpu->iem.s.abOpcode[offOpcode + 5],
3084 pVCpu->iem.s.abOpcode[offOpcode + 6],
3085 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3086# endif
3087 }
3088# endif
3089 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3090}
3091
3092#endif /* IEM_WITH_SETJMP */
3093
3094/**
3095 * Fetches the next opcode quad word, returns automatically on failure.
3096 *
3097 * @param a_pu64 Where to return the opcode quad word.
3098 * @remark Implicitly references pVCpu.
3099 */
3100#ifndef IEM_WITH_SETJMP
3101# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3102 do \
3103 { \
3104 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3105 if (rcStrict2 != VINF_SUCCESS) \
3106 return rcStrict2; \
3107 } while (0)
3108#else
3109# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3110#endif
3111
3112
3113/** @name Misc Worker Functions.
3114 * @{
3115 */
3116
3117/* Currently used only with nested hw.virt. */
3118#ifdef VBOX_WITH_NESTED_HWVIRT
3119/**
3120 * Initiates a CPU shutdown sequence.
3121 *
3122 * @returns Strict VBox status code.
3123 * @param pVCpu The cross context virtual CPU structure of the
3124 * calling thread.
3125 */
3126IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3127{
3128 RT_NOREF_PV(pVCpu);
3129 /** @todo Probably need a separate error code and handling for this to
3130 * distinguish it from the regular triple fault. */
3131 return VINF_EM_TRIPLE_FAULT;
3132}
3133#endif
3134
3135/**
3136 * Validates a new SS segment.
3137 *
3138 * @returns VBox strict status code.
3139 * @param pVCpu The cross context virtual CPU structure of the
3140 * calling thread.
3141 * @param pCtx The CPU context.
3142 * @param NewSS The new SS selctor.
3143 * @param uCpl The CPL to load the stack for.
3144 * @param pDesc Where to return the descriptor.
3145 */
3146IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3147{
3148 NOREF(pCtx);
3149
3150 /* Null selectors are not allowed (we're not called for dispatching
3151 interrupts with SS=0 in long mode). */
3152 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3153 {
3154 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3155 return iemRaiseTaskSwitchFault0(pVCpu);
3156 }
3157
3158 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3159 if ((NewSS & X86_SEL_RPL) != uCpl)
3160 {
3161 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3162 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3163 }
3164
3165 /*
3166 * Read the descriptor.
3167 */
3168 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3169 if (rcStrict != VINF_SUCCESS)
3170 return rcStrict;
3171
3172 /*
3173 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3174 */
3175 if (!pDesc->Legacy.Gen.u1DescType)
3176 {
3177 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3178 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3179 }
3180
3181 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3182 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3183 {
3184 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3185 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3186 }
3187 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3188 {
3189 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3190 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3191 }
3192
3193 /* Is it there? */
3194 /** @todo testcase: Is this checked before the canonical / limit check below? */
3195 if (!pDesc->Legacy.Gen.u1Present)
3196 {
3197 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3198 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3199 }
3200
3201 return VINF_SUCCESS;
3202}
3203
3204
3205/**
3206 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3207 * not.
3208 *
3209 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3210 * @param a_pCtx The CPU context.
3211 */
3212#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3213# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3214 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3215 ? (a_pCtx)->eflags.u \
3216 : CPUMRawGetEFlags(a_pVCpu) )
3217#else
3218# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3219 ( (a_pCtx)->eflags.u )
3220#endif
3221
3222/**
3223 * Updates the EFLAGS in the correct manner wrt. PATM.
3224 *
3225 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3226 * @param a_pCtx The CPU context.
3227 * @param a_fEfl The new EFLAGS.
3228 */
3229#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3230# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3231 do { \
3232 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3233 (a_pCtx)->eflags.u = (a_fEfl); \
3234 else \
3235 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3236 } while (0)
3237#else
3238# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3239 do { \
3240 (a_pCtx)->eflags.u = (a_fEfl); \
3241 } while (0)
3242#endif
3243
3244
3245/** @} */
3246
3247/** @name Raising Exceptions.
3248 *
3249 * @{
3250 */
3251
3252/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3253 * @{ */
3254/** CPU exception. */
3255#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3256/** External interrupt (from PIC, APIC, whatever). */
3257#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3258/** Software interrupt (int or into, not bound).
3259 * Returns to the following instruction */
3260#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3261/** Takes an error code. */
3262#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3263/** Takes a CR2. */
3264#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3265/** Generated by the breakpoint instruction. */
3266#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3267/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3268#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3269/** @} */
3270
3271
3272/**
3273 * Loads the specified stack far pointer from the TSS.
3274 *
3275 * @returns VBox strict status code.
3276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3277 * @param pCtx The CPU context.
3278 * @param uCpl The CPL to load the stack for.
3279 * @param pSelSS Where to return the new stack segment.
3280 * @param puEsp Where to return the new stack pointer.
3281 */
3282IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3283 PRTSEL pSelSS, uint32_t *puEsp)
3284{
3285 VBOXSTRICTRC rcStrict;
3286 Assert(uCpl < 4);
3287
3288 switch (pCtx->tr.Attr.n.u4Type)
3289 {
3290 /*
3291 * 16-bit TSS (X86TSS16).
3292 */
3293 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3294 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3295 {
3296 uint32_t off = uCpl * 4 + 2;
3297 if (off + 4 <= pCtx->tr.u32Limit)
3298 {
3299 /** @todo check actual access pattern here. */
3300 uint32_t u32Tmp = 0; /* gcc maybe... */
3301 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3302 if (rcStrict == VINF_SUCCESS)
3303 {
3304 *puEsp = RT_LOWORD(u32Tmp);
3305 *pSelSS = RT_HIWORD(u32Tmp);
3306 return VINF_SUCCESS;
3307 }
3308 }
3309 else
3310 {
3311 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3312 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3313 }
3314 break;
3315 }
3316
3317 /*
3318 * 32-bit TSS (X86TSS32).
3319 */
3320 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3321 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3322 {
3323 uint32_t off = uCpl * 8 + 4;
3324 if (off + 7 <= pCtx->tr.u32Limit)
3325 {
3326/** @todo check actual access pattern here. */
3327 uint64_t u64Tmp;
3328 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3329 if (rcStrict == VINF_SUCCESS)
3330 {
3331 *puEsp = u64Tmp & UINT32_MAX;
3332 *pSelSS = (RTSEL)(u64Tmp >> 32);
3333 return VINF_SUCCESS;
3334 }
3335 }
3336 else
3337 {
3338 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3339 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3340 }
3341 break;
3342 }
3343
3344 default:
3345 AssertFailed();
3346 rcStrict = VERR_IEM_IPE_4;
3347 break;
3348 }
3349
3350 *puEsp = 0; /* make gcc happy */
3351 *pSelSS = 0; /* make gcc happy */
3352 return rcStrict;
3353}
3354
3355
3356/**
3357 * Loads the specified stack pointer from the 64-bit TSS.
3358 *
3359 * @returns VBox strict status code.
3360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3361 * @param pCtx The CPU context.
3362 * @param uCpl The CPL to load the stack for.
3363 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3364 * @param puRsp Where to return the new stack pointer.
3365 */
3366IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3367{
3368 Assert(uCpl < 4);
3369 Assert(uIst < 8);
3370 *puRsp = 0; /* make gcc happy */
3371
3372 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3373
3374 uint32_t off;
3375 if (uIst)
3376 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3377 else
3378 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3379 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3380 {
3381 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3382 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3383 }
3384
3385 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3386}
3387
3388
3389/**
3390 * Adjust the CPU state according to the exception being raised.
3391 *
3392 * @param pCtx The CPU context.
3393 * @param u8Vector The exception that has been raised.
3394 */
3395DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3396{
3397 switch (u8Vector)
3398 {
3399 case X86_XCPT_DB:
3400 pCtx->dr[7] &= ~X86_DR7_GD;
3401 break;
3402 /** @todo Read the AMD and Intel exception reference... */
3403 }
3404}
3405
3406
3407/**
3408 * Implements exceptions and interrupts for real mode.
3409 *
3410 * @returns VBox strict status code.
3411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3412 * @param pCtx The CPU context.
3413 * @param cbInstr The number of bytes to offset rIP by in the return
3414 * address.
3415 * @param u8Vector The interrupt / exception vector number.
3416 * @param fFlags The flags.
3417 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3418 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3419 */
3420IEM_STATIC VBOXSTRICTRC
3421iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3422 PCPUMCTX pCtx,
3423 uint8_t cbInstr,
3424 uint8_t u8Vector,
3425 uint32_t fFlags,
3426 uint16_t uErr,
3427 uint64_t uCr2)
3428{
3429 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3430 NOREF(uErr); NOREF(uCr2);
3431
3432 /*
3433 * Read the IDT entry.
3434 */
3435 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3436 {
3437 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3438 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3439 }
3440 RTFAR16 Idte;
3441 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3442 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3443 return rcStrict;
3444
3445 /*
3446 * Push the stack frame.
3447 */
3448 uint16_t *pu16Frame;
3449 uint64_t uNewRsp;
3450 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453
3454 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3455#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3456 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3457 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3458 fEfl |= UINT16_C(0xf000);
3459#endif
3460 pu16Frame[2] = (uint16_t)fEfl;
3461 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3462 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3463 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3464 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3465 return rcStrict;
3466
3467 /*
3468 * Load the vector address into cs:ip and make exception specific state
3469 * adjustments.
3470 */
3471 pCtx->cs.Sel = Idte.sel;
3472 pCtx->cs.ValidSel = Idte.sel;
3473 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3474 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3475 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3476 pCtx->rip = Idte.off;
3477 fEfl &= ~X86_EFL_IF;
3478 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3479
3480 /** @todo do we actually do this in real mode? */
3481 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3482 iemRaiseXcptAdjustState(pCtx, u8Vector);
3483
3484 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3485}
3486
3487
3488/**
3489 * Loads a NULL data selector into when coming from V8086 mode.
3490 *
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pSReg Pointer to the segment register.
3493 */
3494IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3495{
3496 pSReg->Sel = 0;
3497 pSReg->ValidSel = 0;
3498 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3499 {
3500 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3501 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3502 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3503 }
3504 else
3505 {
3506 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3507 /** @todo check this on AMD-V */
3508 pSReg->u64Base = 0;
3509 pSReg->u32Limit = 0;
3510 }
3511}
3512
3513
3514/**
3515 * Loads a segment selector during a task switch in V8086 mode.
3516 *
3517 * @param pSReg Pointer to the segment register.
3518 * @param uSel The selector value to load.
3519 */
3520IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3521{
3522 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3523 pSReg->Sel = uSel;
3524 pSReg->ValidSel = uSel;
3525 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3526 pSReg->u64Base = uSel << 4;
3527 pSReg->u32Limit = 0xffff;
3528 pSReg->Attr.u = 0xf3;
3529}
3530
3531
3532/**
3533 * Loads a NULL data selector into a selector register, both the hidden and
3534 * visible parts, in protected mode.
3535 *
3536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3537 * @param pSReg Pointer to the segment register.
3538 * @param uRpl The RPL.
3539 */
3540IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3541{
3542 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3543 * data selector in protected mode. */
3544 pSReg->Sel = uRpl;
3545 pSReg->ValidSel = uRpl;
3546 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3547 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3548 {
3549 /* VT-x (Intel 3960x) observed doing something like this. */
3550 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3551 pSReg->u32Limit = UINT32_MAX;
3552 pSReg->u64Base = 0;
3553 }
3554 else
3555 {
3556 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3557 pSReg->u32Limit = 0;
3558 pSReg->u64Base = 0;
3559 }
3560}
3561
3562
3563/**
3564 * Loads a segment selector during a task switch in protected mode.
3565 *
3566 * In this task switch scenario, we would throw \#TS exceptions rather than
3567 * \#GPs.
3568 *
3569 * @returns VBox strict status code.
3570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3571 * @param pSReg Pointer to the segment register.
3572 * @param uSel The new selector value.
3573 *
3574 * @remarks This does _not_ handle CS or SS.
3575 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3576 */
3577IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3578{
3579 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3580
3581 /* Null data selector. */
3582 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3583 {
3584 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3585 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3586 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3587 return VINF_SUCCESS;
3588 }
3589
3590 /* Fetch the descriptor. */
3591 IEMSELDESC Desc;
3592 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3593 if (rcStrict != VINF_SUCCESS)
3594 {
3595 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3596 VBOXSTRICTRC_VAL(rcStrict)));
3597 return rcStrict;
3598 }
3599
3600 /* Must be a data segment or readable code segment. */
3601 if ( !Desc.Legacy.Gen.u1DescType
3602 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3603 {
3604 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3605 Desc.Legacy.Gen.u4Type));
3606 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3607 }
3608
3609 /* Check privileges for data segments and non-conforming code segments. */
3610 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3611 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3612 {
3613 /* The RPL and the new CPL must be less than or equal to the DPL. */
3614 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3615 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3616 {
3617 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3618 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3619 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3620 }
3621 }
3622
3623 /* Is it there? */
3624 if (!Desc.Legacy.Gen.u1Present)
3625 {
3626 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3627 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3628 }
3629
3630 /* The base and limit. */
3631 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3632 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3633
3634 /*
3635 * Ok, everything checked out fine. Now set the accessed bit before
3636 * committing the result into the registers.
3637 */
3638 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3639 {
3640 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3644 }
3645
3646 /* Commit */
3647 pSReg->Sel = uSel;
3648 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3649 pSReg->u32Limit = cbLimit;
3650 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3651 pSReg->ValidSel = uSel;
3652 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3653 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3654 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3655
3656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3657 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3658 return VINF_SUCCESS;
3659}
3660
3661
3662/**
3663 * Performs a task switch.
3664 *
3665 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3666 * caller is responsible for performing the necessary checks (like DPL, TSS
3667 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3668 * reference for JMP, CALL, IRET.
3669 *
3670 * If the task switch is the due to a software interrupt or hardware exception,
3671 * the caller is responsible for validating the TSS selector and descriptor. See
3672 * Intel Instruction reference for INT n.
3673 *
3674 * @returns VBox strict status code.
3675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3676 * @param pCtx The CPU context.
3677 * @param enmTaskSwitch What caused this task switch.
3678 * @param uNextEip The EIP effective after the task switch.
3679 * @param fFlags The flags.
3680 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3681 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3682 * @param SelTSS The TSS selector of the new task.
3683 * @param pNewDescTSS Pointer to the new TSS descriptor.
3684 */
3685IEM_STATIC VBOXSTRICTRC
3686iemTaskSwitch(PVMCPU pVCpu,
3687 PCPUMCTX pCtx,
3688 IEMTASKSWITCH enmTaskSwitch,
3689 uint32_t uNextEip,
3690 uint32_t fFlags,
3691 uint16_t uErr,
3692 uint64_t uCr2,
3693 RTSEL SelTSS,
3694 PIEMSELDESC pNewDescTSS)
3695{
3696 Assert(!IEM_IS_REAL_MODE(pVCpu));
3697 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3698
3699 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3700 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3701 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3702 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3703 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3704
3705 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3706 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3707
3708 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3709 fIsNewTSS386, pCtx->eip, uNextEip));
3710
3711 /* Update CR2 in case it's a page-fault. */
3712 /** @todo This should probably be done much earlier in IEM/PGM. See
3713 * @bugref{5653#c49}. */
3714 if (fFlags & IEM_XCPT_FLAGS_CR2)
3715 pCtx->cr2 = uCr2;
3716
3717 /*
3718 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3719 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3720 */
3721 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3722 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3723 if (uNewTSSLimit < uNewTSSLimitMin)
3724 {
3725 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3726 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3727 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3728 }
3729
3730 /*
3731 * Check the current TSS limit. The last written byte to the current TSS during the
3732 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3733 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3734 *
3735 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3736 * end up with smaller than "legal" TSS limits.
3737 */
3738 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3739 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3740 if (uCurTSSLimit < uCurTSSLimitMin)
3741 {
3742 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3743 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3744 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3745 }
3746
3747 /*
3748 * Verify that the new TSS can be accessed and map it. Map only the required contents
3749 * and not the entire TSS.
3750 */
3751 void *pvNewTSS;
3752 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3753 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3754 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3755 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3756 * not perform correct translation if this happens. See Intel spec. 7.2.1
3757 * "Task-State Segment" */
3758 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3759 if (rcStrict != VINF_SUCCESS)
3760 {
3761 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3762 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 /*
3767 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3768 */
3769 uint32_t u32EFlags = pCtx->eflags.u32;
3770 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3771 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3772 {
3773 PX86DESC pDescCurTSS;
3774 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3775 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3776 if (rcStrict != VINF_SUCCESS)
3777 {
3778 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3779 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3780 return rcStrict;
3781 }
3782
3783 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3784 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3785 if (rcStrict != VINF_SUCCESS)
3786 {
3787 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3788 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3789 return rcStrict;
3790 }
3791
3792 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3793 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3794 {
3795 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3796 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3797 u32EFlags &= ~X86_EFL_NT;
3798 }
3799 }
3800
3801 /*
3802 * Save the CPU state into the current TSS.
3803 */
3804 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3805 if (GCPtrNewTSS == GCPtrCurTSS)
3806 {
3807 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3808 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3809 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3810 }
3811 if (fIsNewTSS386)
3812 {
3813 /*
3814 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3815 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3816 */
3817 void *pvCurTSS32;
3818 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3819 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3820 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3821 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3822 if (rcStrict != VINF_SUCCESS)
3823 {
3824 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3825 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3826 return rcStrict;
3827 }
3828
3829 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3830 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3831 pCurTSS32->eip = uNextEip;
3832 pCurTSS32->eflags = u32EFlags;
3833 pCurTSS32->eax = pCtx->eax;
3834 pCurTSS32->ecx = pCtx->ecx;
3835 pCurTSS32->edx = pCtx->edx;
3836 pCurTSS32->ebx = pCtx->ebx;
3837 pCurTSS32->esp = pCtx->esp;
3838 pCurTSS32->ebp = pCtx->ebp;
3839 pCurTSS32->esi = pCtx->esi;
3840 pCurTSS32->edi = pCtx->edi;
3841 pCurTSS32->es = pCtx->es.Sel;
3842 pCurTSS32->cs = pCtx->cs.Sel;
3843 pCurTSS32->ss = pCtx->ss.Sel;
3844 pCurTSS32->ds = pCtx->ds.Sel;
3845 pCurTSS32->fs = pCtx->fs.Sel;
3846 pCurTSS32->gs = pCtx->gs.Sel;
3847
3848 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3849 if (rcStrict != VINF_SUCCESS)
3850 {
3851 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3852 VBOXSTRICTRC_VAL(rcStrict)));
3853 return rcStrict;
3854 }
3855 }
3856 else
3857 {
3858 /*
3859 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3860 */
3861 void *pvCurTSS16;
3862 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3863 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3864 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3865 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3866 if (rcStrict != VINF_SUCCESS)
3867 {
3868 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3869 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3870 return rcStrict;
3871 }
3872
3873 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3874 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3875 pCurTSS16->ip = uNextEip;
3876 pCurTSS16->flags = u32EFlags;
3877 pCurTSS16->ax = pCtx->ax;
3878 pCurTSS16->cx = pCtx->cx;
3879 pCurTSS16->dx = pCtx->dx;
3880 pCurTSS16->bx = pCtx->bx;
3881 pCurTSS16->sp = pCtx->sp;
3882 pCurTSS16->bp = pCtx->bp;
3883 pCurTSS16->si = pCtx->si;
3884 pCurTSS16->di = pCtx->di;
3885 pCurTSS16->es = pCtx->es.Sel;
3886 pCurTSS16->cs = pCtx->cs.Sel;
3887 pCurTSS16->ss = pCtx->ss.Sel;
3888 pCurTSS16->ds = pCtx->ds.Sel;
3889
3890 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3891 if (rcStrict != VINF_SUCCESS)
3892 {
3893 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3894 VBOXSTRICTRC_VAL(rcStrict)));
3895 return rcStrict;
3896 }
3897 }
3898
3899 /*
3900 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3901 */
3902 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3903 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3904 {
3905 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3906 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3907 pNewTSS->selPrev = pCtx->tr.Sel;
3908 }
3909
3910 /*
3911 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3912 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3913 */
3914 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3915 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3916 bool fNewDebugTrap;
3917 if (fIsNewTSS386)
3918 {
3919 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3920 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3921 uNewEip = pNewTSS32->eip;
3922 uNewEflags = pNewTSS32->eflags;
3923 uNewEax = pNewTSS32->eax;
3924 uNewEcx = pNewTSS32->ecx;
3925 uNewEdx = pNewTSS32->edx;
3926 uNewEbx = pNewTSS32->ebx;
3927 uNewEsp = pNewTSS32->esp;
3928 uNewEbp = pNewTSS32->ebp;
3929 uNewEsi = pNewTSS32->esi;
3930 uNewEdi = pNewTSS32->edi;
3931 uNewES = pNewTSS32->es;
3932 uNewCS = pNewTSS32->cs;
3933 uNewSS = pNewTSS32->ss;
3934 uNewDS = pNewTSS32->ds;
3935 uNewFS = pNewTSS32->fs;
3936 uNewGS = pNewTSS32->gs;
3937 uNewLdt = pNewTSS32->selLdt;
3938 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3939 }
3940 else
3941 {
3942 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3943 uNewCr3 = 0;
3944 uNewEip = pNewTSS16->ip;
3945 uNewEflags = pNewTSS16->flags;
3946 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3947 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3948 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3949 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3950 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3951 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3952 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3953 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3954 uNewES = pNewTSS16->es;
3955 uNewCS = pNewTSS16->cs;
3956 uNewSS = pNewTSS16->ss;
3957 uNewDS = pNewTSS16->ds;
3958 uNewFS = 0;
3959 uNewGS = 0;
3960 uNewLdt = pNewTSS16->selLdt;
3961 fNewDebugTrap = false;
3962 }
3963
3964 if (GCPtrNewTSS == GCPtrCurTSS)
3965 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3966 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3967
3968 /*
3969 * We're done accessing the new TSS.
3970 */
3971 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3972 if (rcStrict != VINF_SUCCESS)
3973 {
3974 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3975 return rcStrict;
3976 }
3977
3978 /*
3979 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3980 */
3981 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3982 {
3983 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3984 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3985 if (rcStrict != VINF_SUCCESS)
3986 {
3987 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3988 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3989 return rcStrict;
3990 }
3991
3992 /* Check that the descriptor indicates the new TSS is available (not busy). */
3993 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3994 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3995 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3996
3997 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3998 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3999 if (rcStrict != VINF_SUCCESS)
4000 {
4001 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4002 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4003 return rcStrict;
4004 }
4005 }
4006
4007 /*
4008 * From this point on, we're technically in the new task. We will defer exceptions
4009 * until the completion of the task switch but before executing any instructions in the new task.
4010 */
4011 pCtx->tr.Sel = SelTSS;
4012 pCtx->tr.ValidSel = SelTSS;
4013 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4014 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4015 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4016 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4017 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4018
4019 /* Set the busy bit in TR. */
4020 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4021 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4022 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4023 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4024 {
4025 uNewEflags |= X86_EFL_NT;
4026 }
4027
4028 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4029 pCtx->cr0 |= X86_CR0_TS;
4030 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4031
4032 pCtx->eip = uNewEip;
4033 pCtx->eax = uNewEax;
4034 pCtx->ecx = uNewEcx;
4035 pCtx->edx = uNewEdx;
4036 pCtx->ebx = uNewEbx;
4037 pCtx->esp = uNewEsp;
4038 pCtx->ebp = uNewEbp;
4039 pCtx->esi = uNewEsi;
4040 pCtx->edi = uNewEdi;
4041
4042 uNewEflags &= X86_EFL_LIVE_MASK;
4043 uNewEflags |= X86_EFL_RA1_MASK;
4044 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4045
4046 /*
4047 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4048 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4049 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4050 */
4051 pCtx->es.Sel = uNewES;
4052 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4053
4054 pCtx->cs.Sel = uNewCS;
4055 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4056
4057 pCtx->ss.Sel = uNewSS;
4058 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4059
4060 pCtx->ds.Sel = uNewDS;
4061 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4062
4063 pCtx->fs.Sel = uNewFS;
4064 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4065
4066 pCtx->gs.Sel = uNewGS;
4067 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4069
4070 pCtx->ldtr.Sel = uNewLdt;
4071 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4072 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4073 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4074
4075 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4076 {
4077 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4078 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4079 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4080 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4081 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4082 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4083 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4084 }
4085
4086 /*
4087 * Switch CR3 for the new task.
4088 */
4089 if ( fIsNewTSS386
4090 && (pCtx->cr0 & X86_CR0_PG))
4091 {
4092 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4093 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4094 {
4095 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4096 AssertRCSuccessReturn(rc, rc);
4097 }
4098 else
4099 pCtx->cr3 = uNewCr3;
4100
4101 /* Inform PGM. */
4102 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4103 {
4104 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4105 AssertRCReturn(rc, rc);
4106 /* ignore informational status codes */
4107 }
4108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4109 }
4110
4111 /*
4112 * Switch LDTR for the new task.
4113 */
4114 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4115 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4116 else
4117 {
4118 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4119
4120 IEMSELDESC DescNewLdt;
4121 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4122 if (rcStrict != VINF_SUCCESS)
4123 {
4124 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4125 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4126 return rcStrict;
4127 }
4128 if ( !DescNewLdt.Legacy.Gen.u1Present
4129 || DescNewLdt.Legacy.Gen.u1DescType
4130 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4131 {
4132 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4133 uNewLdt, DescNewLdt.Legacy.u));
4134 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4135 }
4136
4137 pCtx->ldtr.ValidSel = uNewLdt;
4138 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4139 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4140 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4141 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4142 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4143 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4145 }
4146
4147 IEMSELDESC DescSS;
4148 if (IEM_IS_V86_MODE(pVCpu))
4149 {
4150 pVCpu->iem.s.uCpl = 3;
4151 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4152 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4153 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4154 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4155 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4156 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4157
4158 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4159 DescSS.Legacy.u = 0;
4160 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4161 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4162 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4163 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4164 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4165 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4166 DescSS.Legacy.Gen.u2Dpl = 3;
4167 }
4168 else
4169 {
4170 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4171
4172 /*
4173 * Load the stack segment for the new task.
4174 */
4175 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4176 {
4177 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4178 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4179 }
4180
4181 /* Fetch the descriptor. */
4182 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4183 if (rcStrict != VINF_SUCCESS)
4184 {
4185 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4186 VBOXSTRICTRC_VAL(rcStrict)));
4187 return rcStrict;
4188 }
4189
4190 /* SS must be a data segment and writable. */
4191 if ( !DescSS.Legacy.Gen.u1DescType
4192 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4193 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4194 {
4195 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4196 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4198 }
4199
4200 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4201 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4202 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4203 {
4204 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4205 uNewCpl));
4206 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4207 }
4208
4209 /* Is it there? */
4210 if (!DescSS.Legacy.Gen.u1Present)
4211 {
4212 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4213 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4214 }
4215
4216 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4217 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4218
4219 /* Set the accessed bit before committing the result into SS. */
4220 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4221 {
4222 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4223 if (rcStrict != VINF_SUCCESS)
4224 return rcStrict;
4225 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4226 }
4227
4228 /* Commit SS. */
4229 pCtx->ss.Sel = uNewSS;
4230 pCtx->ss.ValidSel = uNewSS;
4231 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4232 pCtx->ss.u32Limit = cbLimit;
4233 pCtx->ss.u64Base = u64Base;
4234 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4236
4237 /* CPL has changed, update IEM before loading rest of segments. */
4238 pVCpu->iem.s.uCpl = uNewCpl;
4239
4240 /*
4241 * Load the data segments for the new task.
4242 */
4243 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4244 if (rcStrict != VINF_SUCCESS)
4245 return rcStrict;
4246 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4247 if (rcStrict != VINF_SUCCESS)
4248 return rcStrict;
4249 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4250 if (rcStrict != VINF_SUCCESS)
4251 return rcStrict;
4252 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4253 if (rcStrict != VINF_SUCCESS)
4254 return rcStrict;
4255
4256 /*
4257 * Load the code segment for the new task.
4258 */
4259 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4260 {
4261 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4262 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4263 }
4264
4265 /* Fetch the descriptor. */
4266 IEMSELDESC DescCS;
4267 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4268 if (rcStrict != VINF_SUCCESS)
4269 {
4270 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4271 return rcStrict;
4272 }
4273
4274 /* CS must be a code segment. */
4275 if ( !DescCS.Legacy.Gen.u1DescType
4276 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4277 {
4278 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4279 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4280 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4281 }
4282
4283 /* For conforming CS, DPL must be less than or equal to the RPL. */
4284 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4285 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4286 {
4287 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4288 DescCS.Legacy.Gen.u2Dpl));
4289 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4290 }
4291
4292 /* For non-conforming CS, DPL must match RPL. */
4293 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4294 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4295 {
4296 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4297 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4298 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4299 }
4300
4301 /* Is it there? */
4302 if (!DescCS.Legacy.Gen.u1Present)
4303 {
4304 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4305 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4306 }
4307
4308 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4309 u64Base = X86DESC_BASE(&DescCS.Legacy);
4310
4311 /* Set the accessed bit before committing the result into CS. */
4312 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4313 {
4314 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4315 if (rcStrict != VINF_SUCCESS)
4316 return rcStrict;
4317 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4318 }
4319
4320 /* Commit CS. */
4321 pCtx->cs.Sel = uNewCS;
4322 pCtx->cs.ValidSel = uNewCS;
4323 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4324 pCtx->cs.u32Limit = cbLimit;
4325 pCtx->cs.u64Base = u64Base;
4326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4328 }
4329
4330 /** @todo Debug trap. */
4331 if (fIsNewTSS386 && fNewDebugTrap)
4332 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4333
4334 /*
4335 * Construct the error code masks based on what caused this task switch.
4336 * See Intel Instruction reference for INT.
4337 */
4338 uint16_t uExt;
4339 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4340 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4341 {
4342 uExt = 1;
4343 }
4344 else
4345 uExt = 0;
4346
4347 /*
4348 * Push any error code on to the new stack.
4349 */
4350 if (fFlags & IEM_XCPT_FLAGS_ERR)
4351 {
4352 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4353 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4354 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4355
4356 /* Check that there is sufficient space on the stack. */
4357 /** @todo Factor out segment limit checking for normal/expand down segments
4358 * into a separate function. */
4359 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4360 {
4361 if ( pCtx->esp - 1 > cbLimitSS
4362 || pCtx->esp < cbStackFrame)
4363 {
4364 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4365 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4366 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4367 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4368 }
4369 }
4370 else
4371 {
4372 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4373 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4374 {
4375 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4376 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4377 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4378 }
4379 }
4380
4381
4382 if (fIsNewTSS386)
4383 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4384 else
4385 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4386 if (rcStrict != VINF_SUCCESS)
4387 {
4388 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4389 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4390 return rcStrict;
4391 }
4392 }
4393
4394 /* Check the new EIP against the new CS limit. */
4395 if (pCtx->eip > pCtx->cs.u32Limit)
4396 {
4397 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4398 pCtx->eip, pCtx->cs.u32Limit));
4399 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4400 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4401 }
4402
4403 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4404 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Implements exceptions and interrupts for protected mode.
4410 *
4411 * @returns VBox strict status code.
4412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4413 * @param pCtx The CPU context.
4414 * @param cbInstr The number of bytes to offset rIP by in the return
4415 * address.
4416 * @param u8Vector The interrupt / exception vector number.
4417 * @param fFlags The flags.
4418 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4419 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4420 */
4421IEM_STATIC VBOXSTRICTRC
4422iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4423 PCPUMCTX pCtx,
4424 uint8_t cbInstr,
4425 uint8_t u8Vector,
4426 uint32_t fFlags,
4427 uint16_t uErr,
4428 uint64_t uCr2)
4429{
4430 /*
4431 * Read the IDT entry.
4432 */
4433 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4434 {
4435 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4436 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4437 }
4438 X86DESC Idte;
4439 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4440 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4441 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4442 return rcStrict;
4443 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4444 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4445 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4446
4447 /*
4448 * Check the descriptor type, DPL and such.
4449 * ASSUMES this is done in the same order as described for call-gate calls.
4450 */
4451 if (Idte.Gate.u1DescType)
4452 {
4453 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4454 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4455 }
4456 bool fTaskGate = false;
4457 uint8_t f32BitGate = true;
4458 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4459 switch (Idte.Gate.u4Type)
4460 {
4461 case X86_SEL_TYPE_SYS_UNDEFINED:
4462 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4463 case X86_SEL_TYPE_SYS_LDT:
4464 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4465 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4466 case X86_SEL_TYPE_SYS_UNDEFINED2:
4467 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4468 case X86_SEL_TYPE_SYS_UNDEFINED3:
4469 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4470 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4471 case X86_SEL_TYPE_SYS_UNDEFINED4:
4472 {
4473 /** @todo check what actually happens when the type is wrong...
4474 * esp. call gates. */
4475 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4476 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4477 }
4478
4479 case X86_SEL_TYPE_SYS_286_INT_GATE:
4480 f32BitGate = false;
4481 /* fall thru */
4482 case X86_SEL_TYPE_SYS_386_INT_GATE:
4483 fEflToClear |= X86_EFL_IF;
4484 break;
4485
4486 case X86_SEL_TYPE_SYS_TASK_GATE:
4487 fTaskGate = true;
4488#ifndef IEM_IMPLEMENTS_TASKSWITCH
4489 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4490#endif
4491 break;
4492
4493 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4494 f32BitGate = false;
4495 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4496 break;
4497
4498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4499 }
4500
4501 /* Check DPL against CPL if applicable. */
4502 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4503 {
4504 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4505 {
4506 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4507 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4508 }
4509 }
4510
4511 /* Is it there? */
4512 if (!Idte.Gate.u1Present)
4513 {
4514 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4515 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4516 }
4517
4518 /* Is it a task-gate? */
4519 if (fTaskGate)
4520 {
4521 /*
4522 * Construct the error code masks based on what caused this task switch.
4523 * See Intel Instruction reference for INT.
4524 */
4525 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4526 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4527 RTSEL SelTSS = Idte.Gate.u16Sel;
4528
4529 /*
4530 * Fetch the TSS descriptor in the GDT.
4531 */
4532 IEMSELDESC DescTSS;
4533 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4534 if (rcStrict != VINF_SUCCESS)
4535 {
4536 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4537 VBOXSTRICTRC_VAL(rcStrict)));
4538 return rcStrict;
4539 }
4540
4541 /* The TSS descriptor must be a system segment and be available (not busy). */
4542 if ( DescTSS.Legacy.Gen.u1DescType
4543 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4544 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4545 {
4546 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4547 u8Vector, SelTSS, DescTSS.Legacy.au64));
4548 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4549 }
4550
4551 /* The TSS must be present. */
4552 if (!DescTSS.Legacy.Gen.u1Present)
4553 {
4554 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4555 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4556 }
4557
4558 /* Do the actual task switch. */
4559 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4560 }
4561
4562 /* A null CS is bad. */
4563 RTSEL NewCS = Idte.Gate.u16Sel;
4564 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4565 {
4566 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4567 return iemRaiseGeneralProtectionFault0(pVCpu);
4568 }
4569
4570 /* Fetch the descriptor for the new CS. */
4571 IEMSELDESC DescCS;
4572 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4573 if (rcStrict != VINF_SUCCESS)
4574 {
4575 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4576 return rcStrict;
4577 }
4578
4579 /* Must be a code segment. */
4580 if (!DescCS.Legacy.Gen.u1DescType)
4581 {
4582 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4583 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4584 }
4585 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4586 {
4587 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4588 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4589 }
4590
4591 /* Don't allow lowering the privilege level. */
4592 /** @todo Does the lowering of privileges apply to software interrupts
4593 * only? This has bearings on the more-privileged or
4594 * same-privilege stack behavior further down. A testcase would
4595 * be nice. */
4596 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4597 {
4598 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4599 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4600 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4601 }
4602
4603 /* Make sure the selector is present. */
4604 if (!DescCS.Legacy.Gen.u1Present)
4605 {
4606 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4607 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4608 }
4609
4610 /* Check the new EIP against the new CS limit. */
4611 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4612 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4613 ? Idte.Gate.u16OffsetLow
4614 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4615 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4616 if (uNewEip > cbLimitCS)
4617 {
4618 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4619 u8Vector, uNewEip, cbLimitCS, NewCS));
4620 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4621 }
4622 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4623
4624 /* Calc the flag image to push. */
4625 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4626 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4627 fEfl &= ~X86_EFL_RF;
4628 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4629 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4630
4631 /* From V8086 mode only go to CPL 0. */
4632 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4633 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4634 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4635 {
4636 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4637 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4638 }
4639
4640 /*
4641 * If the privilege level changes, we need to get a new stack from the TSS.
4642 * This in turns means validating the new SS and ESP...
4643 */
4644 if (uNewCpl != pVCpu->iem.s.uCpl)
4645 {
4646 RTSEL NewSS;
4647 uint32_t uNewEsp;
4648 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4649 if (rcStrict != VINF_SUCCESS)
4650 return rcStrict;
4651
4652 IEMSELDESC DescSS;
4653 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4654 if (rcStrict != VINF_SUCCESS)
4655 return rcStrict;
4656 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4657 if (!DescSS.Legacy.Gen.u1DefBig)
4658 {
4659 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4660 uNewEsp = (uint16_t)uNewEsp;
4661 }
4662
4663 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4664
4665 /* Check that there is sufficient space for the stack frame. */
4666 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4667 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4668 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4669 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4670
4671 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4672 {
4673 if ( uNewEsp - 1 > cbLimitSS
4674 || uNewEsp < cbStackFrame)
4675 {
4676 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4677 u8Vector, NewSS, uNewEsp, cbStackFrame));
4678 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4679 }
4680 }
4681 else
4682 {
4683 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4684 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4685 {
4686 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4687 u8Vector, NewSS, uNewEsp, cbStackFrame));
4688 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4689 }
4690 }
4691
4692 /*
4693 * Start making changes.
4694 */
4695
4696 /* Set the new CPL so that stack accesses use it. */
4697 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4698 pVCpu->iem.s.uCpl = uNewCpl;
4699
4700 /* Create the stack frame. */
4701 RTPTRUNION uStackFrame;
4702 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4703 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4704 if (rcStrict != VINF_SUCCESS)
4705 return rcStrict;
4706 void * const pvStackFrame = uStackFrame.pv;
4707 if (f32BitGate)
4708 {
4709 if (fFlags & IEM_XCPT_FLAGS_ERR)
4710 *uStackFrame.pu32++ = uErr;
4711 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4712 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4713 uStackFrame.pu32[2] = fEfl;
4714 uStackFrame.pu32[3] = pCtx->esp;
4715 uStackFrame.pu32[4] = pCtx->ss.Sel;
4716 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4717 if (fEfl & X86_EFL_VM)
4718 {
4719 uStackFrame.pu32[1] = pCtx->cs.Sel;
4720 uStackFrame.pu32[5] = pCtx->es.Sel;
4721 uStackFrame.pu32[6] = pCtx->ds.Sel;
4722 uStackFrame.pu32[7] = pCtx->fs.Sel;
4723 uStackFrame.pu32[8] = pCtx->gs.Sel;
4724 }
4725 }
4726 else
4727 {
4728 if (fFlags & IEM_XCPT_FLAGS_ERR)
4729 *uStackFrame.pu16++ = uErr;
4730 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4731 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4732 uStackFrame.pu16[2] = fEfl;
4733 uStackFrame.pu16[3] = pCtx->sp;
4734 uStackFrame.pu16[4] = pCtx->ss.Sel;
4735 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4736 if (fEfl & X86_EFL_VM)
4737 {
4738 uStackFrame.pu16[1] = pCtx->cs.Sel;
4739 uStackFrame.pu16[5] = pCtx->es.Sel;
4740 uStackFrame.pu16[6] = pCtx->ds.Sel;
4741 uStackFrame.pu16[7] = pCtx->fs.Sel;
4742 uStackFrame.pu16[8] = pCtx->gs.Sel;
4743 }
4744 }
4745 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4746 if (rcStrict != VINF_SUCCESS)
4747 return rcStrict;
4748
4749 /* Mark the selectors 'accessed' (hope this is the correct time). */
4750 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4751 * after pushing the stack frame? (Write protect the gdt + stack to
4752 * find out.) */
4753 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4754 {
4755 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4756 if (rcStrict != VINF_SUCCESS)
4757 return rcStrict;
4758 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4759 }
4760
4761 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4762 {
4763 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4764 if (rcStrict != VINF_SUCCESS)
4765 return rcStrict;
4766 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4767 }
4768
4769 /*
4770 * Start comitting the register changes (joins with the DPL=CPL branch).
4771 */
4772 pCtx->ss.Sel = NewSS;
4773 pCtx->ss.ValidSel = NewSS;
4774 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4775 pCtx->ss.u32Limit = cbLimitSS;
4776 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4777 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4778 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4779 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4780 * SP is loaded).
4781 * Need to check the other combinations too:
4782 * - 16-bit TSS, 32-bit handler
4783 * - 32-bit TSS, 16-bit handler */
4784 if (!pCtx->ss.Attr.n.u1DefBig)
4785 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4786 else
4787 pCtx->rsp = uNewEsp - cbStackFrame;
4788
4789 if (fEfl & X86_EFL_VM)
4790 {
4791 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4792 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4793 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4794 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4795 }
4796 }
4797 /*
4798 * Same privilege, no stack change and smaller stack frame.
4799 */
4800 else
4801 {
4802 uint64_t uNewRsp;
4803 RTPTRUNION uStackFrame;
4804 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4805 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4806 if (rcStrict != VINF_SUCCESS)
4807 return rcStrict;
4808 void * const pvStackFrame = uStackFrame.pv;
4809
4810 if (f32BitGate)
4811 {
4812 if (fFlags & IEM_XCPT_FLAGS_ERR)
4813 *uStackFrame.pu32++ = uErr;
4814 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4815 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4816 uStackFrame.pu32[2] = fEfl;
4817 }
4818 else
4819 {
4820 if (fFlags & IEM_XCPT_FLAGS_ERR)
4821 *uStackFrame.pu16++ = uErr;
4822 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4823 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4824 uStackFrame.pu16[2] = fEfl;
4825 }
4826 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4827 if (rcStrict != VINF_SUCCESS)
4828 return rcStrict;
4829
4830 /* Mark the CS selector as 'accessed'. */
4831 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4832 {
4833 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4834 if (rcStrict != VINF_SUCCESS)
4835 return rcStrict;
4836 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4837 }
4838
4839 /*
4840 * Start committing the register changes (joins with the other branch).
4841 */
4842 pCtx->rsp = uNewRsp;
4843 }
4844
4845 /* ... register committing continues. */
4846 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4847 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4848 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4849 pCtx->cs.u32Limit = cbLimitCS;
4850 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4851 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4852
4853 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4854 fEfl &= ~fEflToClear;
4855 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4856
4857 if (fFlags & IEM_XCPT_FLAGS_CR2)
4858 pCtx->cr2 = uCr2;
4859
4860 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4861 iemRaiseXcptAdjustState(pCtx, u8Vector);
4862
4863 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4864}
4865
4866
4867/**
4868 * Implements exceptions and interrupts for long mode.
4869 *
4870 * @returns VBox strict status code.
4871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4872 * @param pCtx The CPU context.
4873 * @param cbInstr The number of bytes to offset rIP by in the return
4874 * address.
4875 * @param u8Vector The interrupt / exception vector number.
4876 * @param fFlags The flags.
4877 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4878 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4879 */
4880IEM_STATIC VBOXSTRICTRC
4881iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4882 PCPUMCTX pCtx,
4883 uint8_t cbInstr,
4884 uint8_t u8Vector,
4885 uint32_t fFlags,
4886 uint16_t uErr,
4887 uint64_t uCr2)
4888{
4889 /*
4890 * Read the IDT entry.
4891 */
4892 uint16_t offIdt = (uint16_t)u8Vector << 4;
4893 if (pCtx->idtr.cbIdt < offIdt + 7)
4894 {
4895 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4896 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4897 }
4898 X86DESC64 Idte;
4899 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4900 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4901 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4902 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4903 return rcStrict;
4904 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4905 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4906 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4907
4908 /*
4909 * Check the descriptor type, DPL and such.
4910 * ASSUMES this is done in the same order as described for call-gate calls.
4911 */
4912 if (Idte.Gate.u1DescType)
4913 {
4914 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4915 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4916 }
4917 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4918 switch (Idte.Gate.u4Type)
4919 {
4920 case AMD64_SEL_TYPE_SYS_INT_GATE:
4921 fEflToClear |= X86_EFL_IF;
4922 break;
4923 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4924 break;
4925
4926 default:
4927 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4928 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4929 }
4930
4931 /* Check DPL against CPL if applicable. */
4932 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4933 {
4934 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4935 {
4936 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4937 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4938 }
4939 }
4940
4941 /* Is it there? */
4942 if (!Idte.Gate.u1Present)
4943 {
4944 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4945 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4946 }
4947
4948 /* A null CS is bad. */
4949 RTSEL NewCS = Idte.Gate.u16Sel;
4950 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4951 {
4952 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4953 return iemRaiseGeneralProtectionFault0(pVCpu);
4954 }
4955
4956 /* Fetch the descriptor for the new CS. */
4957 IEMSELDESC DescCS;
4958 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4959 if (rcStrict != VINF_SUCCESS)
4960 {
4961 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4962 return rcStrict;
4963 }
4964
4965 /* Must be a 64-bit code segment. */
4966 if (!DescCS.Long.Gen.u1DescType)
4967 {
4968 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4969 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4970 }
4971 if ( !DescCS.Long.Gen.u1Long
4972 || DescCS.Long.Gen.u1DefBig
4973 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4974 {
4975 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4976 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4977 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4978 }
4979
4980 /* Don't allow lowering the privilege level. For non-conforming CS
4981 selectors, the CS.DPL sets the privilege level the trap/interrupt
4982 handler runs at. For conforming CS selectors, the CPL remains
4983 unchanged, but the CS.DPL must be <= CPL. */
4984 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4985 * when CPU in Ring-0. Result \#GP? */
4986 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4987 {
4988 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4989 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4990 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4991 }
4992
4993
4994 /* Make sure the selector is present. */
4995 if (!DescCS.Legacy.Gen.u1Present)
4996 {
4997 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4998 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4999 }
5000
5001 /* Check that the new RIP is canonical. */
5002 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5003 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5004 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5005 if (!IEM_IS_CANONICAL(uNewRip))
5006 {
5007 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5008 return iemRaiseGeneralProtectionFault0(pVCpu);
5009 }
5010
5011 /*
5012 * If the privilege level changes or if the IST isn't zero, we need to get
5013 * a new stack from the TSS.
5014 */
5015 uint64_t uNewRsp;
5016 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5017 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5018 if ( uNewCpl != pVCpu->iem.s.uCpl
5019 || Idte.Gate.u3IST != 0)
5020 {
5021 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5022 if (rcStrict != VINF_SUCCESS)
5023 return rcStrict;
5024 }
5025 else
5026 uNewRsp = pCtx->rsp;
5027 uNewRsp &= ~(uint64_t)0xf;
5028
5029 /*
5030 * Calc the flag image to push.
5031 */
5032 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5033 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5034 fEfl &= ~X86_EFL_RF;
5035 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5036 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5037
5038 /*
5039 * Start making changes.
5040 */
5041 /* Set the new CPL so that stack accesses use it. */
5042 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5043 pVCpu->iem.s.uCpl = uNewCpl;
5044
5045 /* Create the stack frame. */
5046 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5047 RTPTRUNION uStackFrame;
5048 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5049 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5050 if (rcStrict != VINF_SUCCESS)
5051 return rcStrict;
5052 void * const pvStackFrame = uStackFrame.pv;
5053
5054 if (fFlags & IEM_XCPT_FLAGS_ERR)
5055 *uStackFrame.pu64++ = uErr;
5056 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5057 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5058 uStackFrame.pu64[2] = fEfl;
5059 uStackFrame.pu64[3] = pCtx->rsp;
5060 uStackFrame.pu64[4] = pCtx->ss.Sel;
5061 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064
5065 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5066 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5067 * after pushing the stack frame? (Write protect the gdt + stack to
5068 * find out.) */
5069 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5070 {
5071 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5075 }
5076
5077 /*
5078 * Start comitting the register changes.
5079 */
5080 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5081 * hidden registers when interrupting 32-bit or 16-bit code! */
5082 if (uNewCpl != uOldCpl)
5083 {
5084 pCtx->ss.Sel = 0 | uNewCpl;
5085 pCtx->ss.ValidSel = 0 | uNewCpl;
5086 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5087 pCtx->ss.u32Limit = UINT32_MAX;
5088 pCtx->ss.u64Base = 0;
5089 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5090 }
5091 pCtx->rsp = uNewRsp - cbStackFrame;
5092 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5093 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5094 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5095 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5096 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5097 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5098 pCtx->rip = uNewRip;
5099
5100 fEfl &= ~fEflToClear;
5101 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5102
5103 if (fFlags & IEM_XCPT_FLAGS_CR2)
5104 pCtx->cr2 = uCr2;
5105
5106 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5107 iemRaiseXcptAdjustState(pCtx, u8Vector);
5108
5109 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5110}
5111
5112
5113/**
5114 * Implements exceptions and interrupts.
5115 *
5116 * All exceptions and interrupts goes thru this function!
5117 *
5118 * @returns VBox strict status code.
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param cbInstr The number of bytes to offset rIP by in the return
5121 * address.
5122 * @param u8Vector The interrupt / exception vector number.
5123 * @param fFlags The flags.
5124 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5125 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5126 */
5127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5128iemRaiseXcptOrInt(PVMCPU pVCpu,
5129 uint8_t cbInstr,
5130 uint8_t u8Vector,
5131 uint32_t fFlags,
5132 uint16_t uErr,
5133 uint64_t uCr2)
5134{
5135 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5136#ifdef IN_RING0
5137 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5138 AssertRCReturn(rc, rc);
5139#endif
5140
5141#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5142 /*
5143 * Flush prefetch buffer
5144 */
5145 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5146#endif
5147
5148 /*
5149 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5150 */
5151 if ( pCtx->eflags.Bits.u1VM
5152 && pCtx->eflags.Bits.u2IOPL != 3
5153 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5154 && (pCtx->cr0 & X86_CR0_PE) )
5155 {
5156 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5157 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5158 u8Vector = X86_XCPT_GP;
5159 uErr = 0;
5160 }
5161#ifdef DBGFTRACE_ENABLED
5162 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5163 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5164 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5165#endif
5166
5167 /*
5168 * Do recursion accounting.
5169 */
5170 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5171 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5172 if (pVCpu->iem.s.cXcptRecursions == 0)
5173 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5174 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5175 else
5176 {
5177 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5178 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5179
5180 /** @todo double and tripple faults. */
5181 if (pVCpu->iem.s.cXcptRecursions >= 3)
5182 {
5183#ifdef DEBUG_bird
5184 AssertFailed();
5185#endif
5186 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5187 }
5188
5189 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5190 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5191 {
5192 ....
5193 } */
5194 }
5195 pVCpu->iem.s.cXcptRecursions++;
5196 pVCpu->iem.s.uCurXcpt = u8Vector;
5197 pVCpu->iem.s.fCurXcpt = fFlags;
5198
5199 /*
5200 * Extensive logging.
5201 */
5202#if defined(LOG_ENABLED) && defined(IN_RING3)
5203 if (LogIs3Enabled())
5204 {
5205 PVM pVM = pVCpu->CTX_SUFF(pVM);
5206 char szRegs[4096];
5207 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5208 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5209 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5210 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5211 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5212 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5213 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5214 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5215 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5216 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5217 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5218 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5219 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5220 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5221 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5222 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5223 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5224 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5225 " efer=%016VR{efer}\n"
5226 " pat=%016VR{pat}\n"
5227 " sf_mask=%016VR{sf_mask}\n"
5228 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5229 " lstar=%016VR{lstar}\n"
5230 " star=%016VR{star} cstar=%016VR{cstar}\n"
5231 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5232 );
5233
5234 char szInstr[256];
5235 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5236 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5237 szInstr, sizeof(szInstr), NULL);
5238 Log3(("%s%s\n", szRegs, szInstr));
5239 }
5240#endif /* LOG_ENABLED */
5241
5242 /*
5243 * Call the mode specific worker function.
5244 */
5245 VBOXSTRICTRC rcStrict;
5246 if (!(pCtx->cr0 & X86_CR0_PE))
5247 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5248 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5249 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5250 else
5251 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5252
5253 /* Flush the prefetch buffer. */
5254#ifdef IEM_WITH_CODE_TLB
5255 pVCpu->iem.s.pbInstrBuf = NULL;
5256#else
5257 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5258#endif
5259
5260 /*
5261 * Unwind.
5262 */
5263 pVCpu->iem.s.cXcptRecursions--;
5264 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5265 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5266 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5267 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5268 return rcStrict;
5269}
5270
5271#ifdef IEM_WITH_SETJMP
5272/**
5273 * See iemRaiseXcptOrInt. Will not return.
5274 */
5275IEM_STATIC DECL_NO_RETURN(void)
5276iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5277 uint8_t cbInstr,
5278 uint8_t u8Vector,
5279 uint32_t fFlags,
5280 uint16_t uErr,
5281 uint64_t uCr2)
5282{
5283 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5284 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5285}
5286#endif
5287
5288
5289/** \#DE - 00. */
5290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5291{
5292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5293}
5294
5295
5296/** \#DB - 01.
5297 * @note This automatically clear DR7.GD. */
5298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5299{
5300 /** @todo set/clear RF. */
5301 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5302 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5303}
5304
5305
5306/** \#BR - 05. */
5307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5308{
5309 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5310}
5311
5312
5313/** \#UD - 06. */
5314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5315{
5316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5317}
5318
5319
5320/** \#NM - 07. */
5321DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5322{
5323 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5324}
5325
5326
5327/** \#TS(err) - 0a. */
5328DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5329{
5330 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5331}
5332
5333
5334/** \#TS(tr) - 0a. */
5335DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5336{
5337 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5338 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5339}
5340
5341
5342/** \#TS(0) - 0a. */
5343DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5344{
5345 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5346 0, 0);
5347}
5348
5349
5350/** \#TS(err) - 0a. */
5351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5352{
5353 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5354 uSel & X86_SEL_MASK_OFF_RPL, 0);
5355}
5356
5357
5358/** \#NP(err) - 0b. */
5359DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5360{
5361 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5362}
5363
5364
5365/** \#NP(sel) - 0b. */
5366DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5367{
5368 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5369 uSel & ~X86_SEL_RPL, 0);
5370}
5371
5372
5373/** \#SS(seg) - 0c. */
5374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5375{
5376 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5377 uSel & ~X86_SEL_RPL, 0);
5378}
5379
5380
5381/** \#SS(err) - 0c. */
5382DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5383{
5384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5385}
5386
5387
5388/** \#GP(n) - 0d. */
5389DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5390{
5391 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5392}
5393
5394
5395/** \#GP(0) - 0d. */
5396DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5397{
5398 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5399}
5400
5401#ifdef IEM_WITH_SETJMP
5402/** \#GP(0) - 0d. */
5403DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5404{
5405 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5406}
5407#endif
5408
5409
5410/** \#GP(sel) - 0d. */
5411DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5412{
5413 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5414 Sel & ~X86_SEL_RPL, 0);
5415}
5416
5417
5418/** \#GP(0) - 0d. */
5419DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5420{
5421 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5422}
5423
5424
5425/** \#GP(sel) - 0d. */
5426DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5427{
5428 NOREF(iSegReg); NOREF(fAccess);
5429 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5430 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5431}
5432
5433#ifdef IEM_WITH_SETJMP
5434/** \#GP(sel) - 0d, longjmp. */
5435DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5436{
5437 NOREF(iSegReg); NOREF(fAccess);
5438 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5439 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5440}
5441#endif
5442
5443/** \#GP(sel) - 0d. */
5444DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5445{
5446 NOREF(Sel);
5447 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5448}
5449
5450#ifdef IEM_WITH_SETJMP
5451/** \#GP(sel) - 0d, longjmp. */
5452DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5453{
5454 NOREF(Sel);
5455 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5456}
5457#endif
5458
5459
5460/** \#GP(sel) - 0d. */
5461DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5462{
5463 NOREF(iSegReg); NOREF(fAccess);
5464 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5465}
5466
5467#ifdef IEM_WITH_SETJMP
5468/** \#GP(sel) - 0d, longjmp. */
5469DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5470 uint32_t fAccess)
5471{
5472 NOREF(iSegReg); NOREF(fAccess);
5473 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5474}
5475#endif
5476
5477
5478/** \#PF(n) - 0e. */
5479DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5480{
5481 uint16_t uErr;
5482 switch (rc)
5483 {
5484 case VERR_PAGE_NOT_PRESENT:
5485 case VERR_PAGE_TABLE_NOT_PRESENT:
5486 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5487 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5488 uErr = 0;
5489 break;
5490
5491 default:
5492 AssertMsgFailed(("%Rrc\n", rc));
5493 /* fall thru */
5494 case VERR_ACCESS_DENIED:
5495 uErr = X86_TRAP_PF_P;
5496 break;
5497
5498 /** @todo reserved */
5499 }
5500
5501 if (pVCpu->iem.s.uCpl == 3)
5502 uErr |= X86_TRAP_PF_US;
5503
5504 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5505 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5506 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5507 uErr |= X86_TRAP_PF_ID;
5508
5509#if 0 /* This is so much non-sense, really. Why was it done like that? */
5510 /* Note! RW access callers reporting a WRITE protection fault, will clear
5511 the READ flag before calling. So, read-modify-write accesses (RW)
5512 can safely be reported as READ faults. */
5513 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5514 uErr |= X86_TRAP_PF_RW;
5515#else
5516 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5517 {
5518 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5519 uErr |= X86_TRAP_PF_RW;
5520 }
5521#endif
5522
5523 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5524 uErr, GCPtrWhere);
5525}
5526
5527#ifdef IEM_WITH_SETJMP
5528/** \#PF(n) - 0e, longjmp. */
5529IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5530{
5531 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5532}
5533#endif
5534
5535
5536/** \#MF(0) - 10. */
5537DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5538{
5539 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5540}
5541
5542
5543/** \#AC(0) - 11. */
5544DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5545{
5546 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5547}
5548
5549
5550/**
5551 * Macro for calling iemCImplRaiseDivideError().
5552 *
5553 * This enables us to add/remove arguments and force different levels of
5554 * inlining as we wish.
5555 *
5556 * @return Strict VBox status code.
5557 */
5558#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5559IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5560{
5561 NOREF(cbInstr);
5562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5563}
5564
5565
5566/**
5567 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5568 *
5569 * This enables us to add/remove arguments and force different levels of
5570 * inlining as we wish.
5571 *
5572 * @return Strict VBox status code.
5573 */
5574#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5575IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5576{
5577 NOREF(cbInstr);
5578 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5579}
5580
5581
5582/**
5583 * Macro for calling iemCImplRaiseInvalidOpcode().
5584 *
5585 * This enables us to add/remove arguments and force different levels of
5586 * inlining as we wish.
5587 *
5588 * @return Strict VBox status code.
5589 */
5590#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5591IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5592{
5593 NOREF(cbInstr);
5594 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5595}
5596
5597
5598/** @} */
5599
5600
5601/*
5602 *
5603 * Helpers routines.
5604 * Helpers routines.
5605 * Helpers routines.
5606 *
5607 */
5608
5609/**
5610 * Recalculates the effective operand size.
5611 *
5612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5613 */
5614IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5615{
5616 switch (pVCpu->iem.s.enmCpuMode)
5617 {
5618 case IEMMODE_16BIT:
5619 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5620 break;
5621 case IEMMODE_32BIT:
5622 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5623 break;
5624 case IEMMODE_64BIT:
5625 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5626 {
5627 case 0:
5628 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5629 break;
5630 case IEM_OP_PRF_SIZE_OP:
5631 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5632 break;
5633 case IEM_OP_PRF_SIZE_REX_W:
5634 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5635 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5636 break;
5637 }
5638 break;
5639 default:
5640 AssertFailed();
5641 }
5642}
5643
5644
5645/**
5646 * Sets the default operand size to 64-bit and recalculates the effective
5647 * operand size.
5648 *
5649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5650 */
5651IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5652{
5653 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5654 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5655 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5656 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5657 else
5658 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5659}
5660
5661
5662/*
5663 *
5664 * Common opcode decoders.
5665 * Common opcode decoders.
5666 * Common opcode decoders.
5667 *
5668 */
5669//#include <iprt/mem.h>
5670
5671/**
5672 * Used to add extra details about a stub case.
5673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5674 */
5675IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5676{
5677#if defined(LOG_ENABLED) && defined(IN_RING3)
5678 PVM pVM = pVCpu->CTX_SUFF(pVM);
5679 char szRegs[4096];
5680 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5681 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5682 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5683 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5684 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5685 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5686 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5687 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5688 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5689 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5690 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5691 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5692 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5693 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5694 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5695 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5696 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5697 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5698 " efer=%016VR{efer}\n"
5699 " pat=%016VR{pat}\n"
5700 " sf_mask=%016VR{sf_mask}\n"
5701 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5702 " lstar=%016VR{lstar}\n"
5703 " star=%016VR{star} cstar=%016VR{cstar}\n"
5704 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5705 );
5706
5707 char szInstr[256];
5708 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5709 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5710 szInstr, sizeof(szInstr), NULL);
5711
5712 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5713#else
5714 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5715#endif
5716}
5717
5718/**
5719 * Complains about a stub.
5720 *
5721 * Providing two versions of this macro, one for daily use and one for use when
5722 * working on IEM.
5723 */
5724#if 0
5725# define IEMOP_BITCH_ABOUT_STUB() \
5726 do { \
5727 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5728 iemOpStubMsg2(pVCpu); \
5729 RTAssertPanic(); \
5730 } while (0)
5731#else
5732# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5733#endif
5734
5735/** Stubs an opcode. */
5736#define FNIEMOP_STUB(a_Name) \
5737 FNIEMOP_DEF(a_Name) \
5738 { \
5739 RT_NOREF_PV(pVCpu); \
5740 IEMOP_BITCH_ABOUT_STUB(); \
5741 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5742 } \
5743 typedef int ignore_semicolon
5744
5745/** Stubs an opcode. */
5746#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5747 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5748 { \
5749 RT_NOREF_PV(pVCpu); \
5750 RT_NOREF_PV(a_Name0); \
5751 IEMOP_BITCH_ABOUT_STUB(); \
5752 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5753 } \
5754 typedef int ignore_semicolon
5755
5756/** Stubs an opcode which currently should raise \#UD. */
5757#define FNIEMOP_UD_STUB(a_Name) \
5758 FNIEMOP_DEF(a_Name) \
5759 { \
5760 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5761 return IEMOP_RAISE_INVALID_OPCODE(); \
5762 } \
5763 typedef int ignore_semicolon
5764
5765/** Stubs an opcode which currently should raise \#UD. */
5766#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5767 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5768 { \
5769 RT_NOREF_PV(pVCpu); \
5770 RT_NOREF_PV(a_Name0); \
5771 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5772 return IEMOP_RAISE_INVALID_OPCODE(); \
5773 } \
5774 typedef int ignore_semicolon
5775
5776
5777
5778/** @name Register Access.
5779 * @{
5780 */
5781
5782/**
5783 * Gets a reference (pointer) to the specified hidden segment register.
5784 *
5785 * @returns Hidden register reference.
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param iSegReg The segment register.
5788 */
5789IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5790{
5791 Assert(iSegReg < X86_SREG_COUNT);
5792 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5793 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5794
5795#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5796 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5797 { /* likely */ }
5798 else
5799 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5800#else
5801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5802#endif
5803 return pSReg;
5804}
5805
5806
5807/**
5808 * Ensures that the given hidden segment register is up to date.
5809 *
5810 * @returns Hidden register reference.
5811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5812 * @param pSReg The segment register.
5813 */
5814IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5815{
5816#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5817 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5818 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5819#else
5820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5821 NOREF(pVCpu);
5822#endif
5823 return pSReg;
5824}
5825
5826
5827/**
5828 * Gets a reference (pointer) to the specified segment register (the selector
5829 * value).
5830 *
5831 * @returns Pointer to the selector variable.
5832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5833 * @param iSegReg The segment register.
5834 */
5835DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5836{
5837 Assert(iSegReg < X86_SREG_COUNT);
5838 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5839 return &pCtx->aSRegs[iSegReg].Sel;
5840}
5841
5842
5843/**
5844 * Fetches the selector value of a segment register.
5845 *
5846 * @returns The selector value.
5847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5848 * @param iSegReg The segment register.
5849 */
5850DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5851{
5852 Assert(iSegReg < X86_SREG_COUNT);
5853 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5854}
5855
5856
5857/**
5858 * Gets a reference (pointer) to the specified general purpose register.
5859 *
5860 * @returns Register reference.
5861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5862 * @param iReg The general purpose register.
5863 */
5864DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5865{
5866 Assert(iReg < 16);
5867 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5868 return &pCtx->aGRegs[iReg];
5869}
5870
5871
5872/**
5873 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5874 *
5875 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5876 *
5877 * @returns Register reference.
5878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5879 * @param iReg The register.
5880 */
5881DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5882{
5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5884 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5885 {
5886 Assert(iReg < 16);
5887 return &pCtx->aGRegs[iReg].u8;
5888 }
5889 /* high 8-bit register. */
5890 Assert(iReg < 8);
5891 return &pCtx->aGRegs[iReg & 3].bHi;
5892}
5893
5894
5895/**
5896 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5897 *
5898 * @returns Register reference.
5899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5900 * @param iReg The register.
5901 */
5902DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5903{
5904 Assert(iReg < 16);
5905 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5906 return &pCtx->aGRegs[iReg].u16;
5907}
5908
5909
5910/**
5911 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5912 *
5913 * @returns Register reference.
5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5915 * @param iReg The register.
5916 */
5917DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5918{
5919 Assert(iReg < 16);
5920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5921 return &pCtx->aGRegs[iReg].u32;
5922}
5923
5924
5925/**
5926 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5927 *
5928 * @returns Register reference.
5929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5930 * @param iReg The register.
5931 */
5932DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5933{
5934 Assert(iReg < 64);
5935 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5936 return &pCtx->aGRegs[iReg].u64;
5937}
5938
5939
5940/**
5941 * Fetches the value of a 8-bit general purpose register.
5942 *
5943 * @returns The register value.
5944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5945 * @param iReg The register.
5946 */
5947DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5948{
5949 return *iemGRegRefU8(pVCpu, iReg);
5950}
5951
5952
5953/**
5954 * Fetches the value of a 16-bit general purpose register.
5955 *
5956 * @returns The register value.
5957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5958 * @param iReg The register.
5959 */
5960DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5961{
5962 Assert(iReg < 16);
5963 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5964}
5965
5966
5967/**
5968 * Fetches the value of a 32-bit general purpose register.
5969 *
5970 * @returns The register value.
5971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5972 * @param iReg The register.
5973 */
5974DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5975{
5976 Assert(iReg < 16);
5977 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5978}
5979
5980
5981/**
5982 * Fetches the value of a 64-bit general purpose register.
5983 *
5984 * @returns The register value.
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 * @param iReg The register.
5987 */
5988DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5989{
5990 Assert(iReg < 16);
5991 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5992}
5993
5994
5995/**
5996 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5997 *
5998 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5999 * segment limit.
6000 *
6001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6002 * @param offNextInstr The offset of the next instruction.
6003 */
6004IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6005{
6006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6007 switch (pVCpu->iem.s.enmEffOpSize)
6008 {
6009 case IEMMODE_16BIT:
6010 {
6011 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6012 if ( uNewIp > pCtx->cs.u32Limit
6013 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6014 return iemRaiseGeneralProtectionFault0(pVCpu);
6015 pCtx->rip = uNewIp;
6016 break;
6017 }
6018
6019 case IEMMODE_32BIT:
6020 {
6021 Assert(pCtx->rip <= UINT32_MAX);
6022 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6023
6024 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6025 if (uNewEip > pCtx->cs.u32Limit)
6026 return iemRaiseGeneralProtectionFault0(pVCpu);
6027 pCtx->rip = uNewEip;
6028 break;
6029 }
6030
6031 case IEMMODE_64BIT:
6032 {
6033 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6034
6035 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6036 if (!IEM_IS_CANONICAL(uNewRip))
6037 return iemRaiseGeneralProtectionFault0(pVCpu);
6038 pCtx->rip = uNewRip;
6039 break;
6040 }
6041
6042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6043 }
6044
6045 pCtx->eflags.Bits.u1RF = 0;
6046
6047#ifndef IEM_WITH_CODE_TLB
6048 /* Flush the prefetch buffer. */
6049 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6050#endif
6051
6052 return VINF_SUCCESS;
6053}
6054
6055
6056/**
6057 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6058 *
6059 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6060 * segment limit.
6061 *
6062 * @returns Strict VBox status code.
6063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6064 * @param offNextInstr The offset of the next instruction.
6065 */
6066IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6067{
6068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6069 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6070
6071 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6072 if ( uNewIp > pCtx->cs.u32Limit
6073 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6074 return iemRaiseGeneralProtectionFault0(pVCpu);
6075 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6076 pCtx->rip = uNewIp;
6077 pCtx->eflags.Bits.u1RF = 0;
6078
6079#ifndef IEM_WITH_CODE_TLB
6080 /* Flush the prefetch buffer. */
6081 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6082#endif
6083
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6090 *
6091 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6092 * segment limit.
6093 *
6094 * @returns Strict VBox status code.
6095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6096 * @param offNextInstr The offset of the next instruction.
6097 */
6098IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6099{
6100 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6101 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6102
6103 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6104 {
6105 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6106
6107 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6108 if (uNewEip > pCtx->cs.u32Limit)
6109 return iemRaiseGeneralProtectionFault0(pVCpu);
6110 pCtx->rip = uNewEip;
6111 }
6112 else
6113 {
6114 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6115
6116 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6117 if (!IEM_IS_CANONICAL(uNewRip))
6118 return iemRaiseGeneralProtectionFault0(pVCpu);
6119 pCtx->rip = uNewRip;
6120 }
6121 pCtx->eflags.Bits.u1RF = 0;
6122
6123#ifndef IEM_WITH_CODE_TLB
6124 /* Flush the prefetch buffer. */
6125 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6126#endif
6127
6128 return VINF_SUCCESS;
6129}
6130
6131
6132/**
6133 * Performs a near jump to the specified address.
6134 *
6135 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6136 * segment limit.
6137 *
6138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6139 * @param uNewRip The new RIP value.
6140 */
6141IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6142{
6143 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6144 switch (pVCpu->iem.s.enmEffOpSize)
6145 {
6146 case IEMMODE_16BIT:
6147 {
6148 Assert(uNewRip <= UINT16_MAX);
6149 if ( uNewRip > pCtx->cs.u32Limit
6150 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6151 return iemRaiseGeneralProtectionFault0(pVCpu);
6152 /** @todo Test 16-bit jump in 64-bit mode. */
6153 pCtx->rip = uNewRip;
6154 break;
6155 }
6156
6157 case IEMMODE_32BIT:
6158 {
6159 Assert(uNewRip <= UINT32_MAX);
6160 Assert(pCtx->rip <= UINT32_MAX);
6161 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6162
6163 if (uNewRip > pCtx->cs.u32Limit)
6164 return iemRaiseGeneralProtectionFault0(pVCpu);
6165 pCtx->rip = uNewRip;
6166 break;
6167 }
6168
6169 case IEMMODE_64BIT:
6170 {
6171 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6172
6173 if (!IEM_IS_CANONICAL(uNewRip))
6174 return iemRaiseGeneralProtectionFault0(pVCpu);
6175 pCtx->rip = uNewRip;
6176 break;
6177 }
6178
6179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6180 }
6181
6182 pCtx->eflags.Bits.u1RF = 0;
6183
6184#ifndef IEM_WITH_CODE_TLB
6185 /* Flush the prefetch buffer. */
6186 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6187#endif
6188
6189 return VINF_SUCCESS;
6190}
6191
6192
6193/**
6194 * Get the address of the top of the stack.
6195 *
6196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6197 * @param pCtx The CPU context which SP/ESP/RSP should be
6198 * read.
6199 */
6200DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6201{
6202 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6203 return pCtx->rsp;
6204 if (pCtx->ss.Attr.n.u1DefBig)
6205 return pCtx->esp;
6206 return pCtx->sp;
6207}
6208
6209
6210/**
6211 * Updates the RIP/EIP/IP to point to the next instruction.
6212 *
6213 * This function leaves the EFLAGS.RF flag alone.
6214 *
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param cbInstr The number of bytes to add.
6217 */
6218IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6219{
6220 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6221 switch (pVCpu->iem.s.enmCpuMode)
6222 {
6223 case IEMMODE_16BIT:
6224 Assert(pCtx->rip <= UINT16_MAX);
6225 pCtx->eip += cbInstr;
6226 pCtx->eip &= UINT32_C(0xffff);
6227 break;
6228
6229 case IEMMODE_32BIT:
6230 pCtx->eip += cbInstr;
6231 Assert(pCtx->rip <= UINT32_MAX);
6232 break;
6233
6234 case IEMMODE_64BIT:
6235 pCtx->rip += cbInstr;
6236 break;
6237 default: AssertFailed();
6238 }
6239}
6240
6241
6242#if 0
6243/**
6244 * Updates the RIP/EIP/IP to point to the next instruction.
6245 *
6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6247 */
6248IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6249{
6250 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6251}
6252#endif
6253
6254
6255
6256/**
6257 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6258 *
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param cbInstr The number of bytes to add.
6261 */
6262IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6263{
6264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6265
6266 pCtx->eflags.Bits.u1RF = 0;
6267
6268 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6269#if ARCH_BITS >= 64
6270 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6271 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6272 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6273#else
6274 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6275 pCtx->rip += cbInstr;
6276 else
6277 {
6278 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6279 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6280 }
6281#endif
6282}
6283
6284
6285/**
6286 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6287 *
6288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6289 */
6290IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6291{
6292 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6293}
6294
6295
6296/**
6297 * Adds to the stack pointer.
6298 *
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param pCtx The CPU context which SP/ESP/RSP should be
6301 * updated.
6302 * @param cbToAdd The number of bytes to add (8-bit!).
6303 */
6304DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6305{
6306 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6307 pCtx->rsp += cbToAdd;
6308 else if (pCtx->ss.Attr.n.u1DefBig)
6309 pCtx->esp += cbToAdd;
6310 else
6311 pCtx->sp += cbToAdd;
6312}
6313
6314
6315/**
6316 * Subtracts from the stack pointer.
6317 *
6318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6319 * @param pCtx The CPU context which SP/ESP/RSP should be
6320 * updated.
6321 * @param cbToSub The number of bytes to subtract (8-bit!).
6322 */
6323DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6324{
6325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6326 pCtx->rsp -= cbToSub;
6327 else if (pCtx->ss.Attr.n.u1DefBig)
6328 pCtx->esp -= cbToSub;
6329 else
6330 pCtx->sp -= cbToSub;
6331}
6332
6333
6334/**
6335 * Adds to the temporary stack pointer.
6336 *
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6339 * @param cbToAdd The number of bytes to add (16-bit).
6340 * @param pCtx Where to get the current stack mode.
6341 */
6342DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6343{
6344 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6345 pTmpRsp->u += cbToAdd;
6346 else if (pCtx->ss.Attr.n.u1DefBig)
6347 pTmpRsp->DWords.dw0 += cbToAdd;
6348 else
6349 pTmpRsp->Words.w0 += cbToAdd;
6350}
6351
6352
6353/**
6354 * Subtracts from the temporary stack pointer.
6355 *
6356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6357 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6358 * @param cbToSub The number of bytes to subtract.
6359 * @param pCtx Where to get the current stack mode.
6360 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6361 * expecting that.
6362 */
6363DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6364{
6365 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6366 pTmpRsp->u -= cbToSub;
6367 else if (pCtx->ss.Attr.n.u1DefBig)
6368 pTmpRsp->DWords.dw0 -= cbToSub;
6369 else
6370 pTmpRsp->Words.w0 -= cbToSub;
6371}
6372
6373
6374/**
6375 * Calculates the effective stack address for a push of the specified size as
6376 * well as the new RSP value (upper bits may be masked).
6377 *
6378 * @returns Effective stack addressf for the push.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param pCtx Where to get the current stack mode.
6381 * @param cbItem The size of the stack item to pop.
6382 * @param puNewRsp Where to return the new RSP value.
6383 */
6384DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6385{
6386 RTUINT64U uTmpRsp;
6387 RTGCPTR GCPtrTop;
6388 uTmpRsp.u = pCtx->rsp;
6389
6390 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6391 GCPtrTop = uTmpRsp.u -= cbItem;
6392 else if (pCtx->ss.Attr.n.u1DefBig)
6393 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6394 else
6395 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6396 *puNewRsp = uTmpRsp.u;
6397 return GCPtrTop;
6398}
6399
6400
6401/**
6402 * Gets the current stack pointer and calculates the value after a pop of the
6403 * specified size.
6404 *
6405 * @returns Current stack pointer.
6406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6407 * @param pCtx Where to get the current stack mode.
6408 * @param cbItem The size of the stack item to pop.
6409 * @param puNewRsp Where to return the new RSP value.
6410 */
6411DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6412{
6413 RTUINT64U uTmpRsp;
6414 RTGCPTR GCPtrTop;
6415 uTmpRsp.u = pCtx->rsp;
6416
6417 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6418 {
6419 GCPtrTop = uTmpRsp.u;
6420 uTmpRsp.u += cbItem;
6421 }
6422 else if (pCtx->ss.Attr.n.u1DefBig)
6423 {
6424 GCPtrTop = uTmpRsp.DWords.dw0;
6425 uTmpRsp.DWords.dw0 += cbItem;
6426 }
6427 else
6428 {
6429 GCPtrTop = uTmpRsp.Words.w0;
6430 uTmpRsp.Words.w0 += cbItem;
6431 }
6432 *puNewRsp = uTmpRsp.u;
6433 return GCPtrTop;
6434}
6435
6436
6437/**
6438 * Calculates the effective stack address for a push of the specified size as
6439 * well as the new temporary RSP value (upper bits may be masked).
6440 *
6441 * @returns Effective stack addressf for the push.
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param pCtx Where to get the current stack mode.
6444 * @param pTmpRsp The temporary stack pointer. This is updated.
6445 * @param cbItem The size of the stack item to pop.
6446 */
6447DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6448{
6449 RTGCPTR GCPtrTop;
6450
6451 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6452 GCPtrTop = pTmpRsp->u -= cbItem;
6453 else if (pCtx->ss.Attr.n.u1DefBig)
6454 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6455 else
6456 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6457 return GCPtrTop;
6458}
6459
6460
6461/**
6462 * Gets the effective stack address for a pop of the specified size and
6463 * calculates and updates the temporary RSP.
6464 *
6465 * @returns Current stack pointer.
6466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6467 * @param pCtx Where to get the current stack mode.
6468 * @param pTmpRsp The temporary stack pointer. This is updated.
6469 * @param cbItem The size of the stack item to pop.
6470 */
6471DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6472{
6473 RTGCPTR GCPtrTop;
6474 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6475 {
6476 GCPtrTop = pTmpRsp->u;
6477 pTmpRsp->u += cbItem;
6478 }
6479 else if (pCtx->ss.Attr.n.u1DefBig)
6480 {
6481 GCPtrTop = pTmpRsp->DWords.dw0;
6482 pTmpRsp->DWords.dw0 += cbItem;
6483 }
6484 else
6485 {
6486 GCPtrTop = pTmpRsp->Words.w0;
6487 pTmpRsp->Words.w0 += cbItem;
6488 }
6489 return GCPtrTop;
6490}
6491
6492/** @} */
6493
6494
6495/** @name FPU access and helpers.
6496 *
6497 * @{
6498 */
6499
6500
6501/**
6502 * Hook for preparing to use the host FPU.
6503 *
6504 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6505 *
6506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6507 */
6508DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6509{
6510#ifdef IN_RING3
6511 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6512#else
6513 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6514#endif
6515}
6516
6517
6518/**
6519 * Hook for preparing to use the host FPU for SSE
6520 *
6521 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6522 *
6523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6524 */
6525DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6526{
6527 iemFpuPrepareUsage(pVCpu);
6528}
6529
6530
6531/**
6532 * Hook for actualizing the guest FPU state before the interpreter reads it.
6533 *
6534 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6535 *
6536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6537 */
6538DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6539{
6540#ifdef IN_RING3
6541 NOREF(pVCpu);
6542#else
6543 CPUMRZFpuStateActualizeForRead(pVCpu);
6544#endif
6545}
6546
6547
6548/**
6549 * Hook for actualizing the guest FPU state before the interpreter changes it.
6550 *
6551 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6552 *
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 */
6555DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6556{
6557#ifdef IN_RING3
6558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6559#else
6560 CPUMRZFpuStateActualizeForChange(pVCpu);
6561#endif
6562}
6563
6564
6565/**
6566 * Hook for actualizing the guest XMM0..15 register state for read only.
6567 *
6568 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6569 *
6570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6571 */
6572DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6573{
6574#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6575 NOREF(pVCpu);
6576#else
6577 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6578#endif
6579}
6580
6581
6582/**
6583 * Hook for actualizing the guest XMM0..15 register state for read+write.
6584 *
6585 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6590{
6591#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6592 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6593#else
6594 CPUMRZFpuStateActualizeForChange(pVCpu);
6595#endif
6596}
6597
6598
6599/**
6600 * Stores a QNaN value into a FPU register.
6601 *
6602 * @param pReg Pointer to the register.
6603 */
6604DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6605{
6606 pReg->au32[0] = UINT32_C(0x00000000);
6607 pReg->au32[1] = UINT32_C(0xc0000000);
6608 pReg->au16[4] = UINT16_C(0xffff);
6609}
6610
6611
6612/**
6613 * Updates the FOP, FPU.CS and FPUIP registers.
6614 *
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 * @param pCtx The CPU context.
6617 * @param pFpuCtx The FPU context.
6618 */
6619DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6620{
6621 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6622 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6623 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6624 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6625 {
6626 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6627 * happens in real mode here based on the fnsave and fnstenv images. */
6628 pFpuCtx->CS = 0;
6629 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6630 }
6631 else
6632 {
6633 pFpuCtx->CS = pCtx->cs.Sel;
6634 pFpuCtx->FPUIP = pCtx->rip;
6635 }
6636}
6637
6638
6639/**
6640 * Updates the x87.DS and FPUDP registers.
6641 *
6642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6643 * @param pCtx The CPU context.
6644 * @param pFpuCtx The FPU context.
6645 * @param iEffSeg The effective segment register.
6646 * @param GCPtrEff The effective address relative to @a iEffSeg.
6647 */
6648DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6649{
6650 RTSEL sel;
6651 switch (iEffSeg)
6652 {
6653 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6654 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6655 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6656 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6657 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6658 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6659 default:
6660 AssertMsgFailed(("%d\n", iEffSeg));
6661 sel = pCtx->ds.Sel;
6662 }
6663 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6664 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6665 {
6666 pFpuCtx->DS = 0;
6667 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6668 }
6669 else
6670 {
6671 pFpuCtx->DS = sel;
6672 pFpuCtx->FPUDP = GCPtrEff;
6673 }
6674}
6675
6676
6677/**
6678 * Rotates the stack registers in the push direction.
6679 *
6680 * @param pFpuCtx The FPU context.
6681 * @remarks This is a complete waste of time, but fxsave stores the registers in
6682 * stack order.
6683 */
6684DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6685{
6686 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6687 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6688 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6689 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6690 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6691 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6692 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6693 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6694 pFpuCtx->aRegs[0].r80 = r80Tmp;
6695}
6696
6697
6698/**
6699 * Rotates the stack registers in the pop direction.
6700 *
6701 * @param pFpuCtx The FPU context.
6702 * @remarks This is a complete waste of time, but fxsave stores the registers in
6703 * stack order.
6704 */
6705DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6706{
6707 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6708 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6709 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6710 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6711 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6712 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6713 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6714 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6715 pFpuCtx->aRegs[7].r80 = r80Tmp;
6716}
6717
6718
6719/**
6720 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6721 * exception prevents it.
6722 *
6723 * @param pResult The FPU operation result to push.
6724 * @param pFpuCtx The FPU context.
6725 */
6726IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6727{
6728 /* Update FSW and bail if there are pending exceptions afterwards. */
6729 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6730 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6731 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6732 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6733 {
6734 pFpuCtx->FSW = fFsw;
6735 return;
6736 }
6737
6738 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6739 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6740 {
6741 /* All is fine, push the actual value. */
6742 pFpuCtx->FTW |= RT_BIT(iNewTop);
6743 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6744 }
6745 else if (pFpuCtx->FCW & X86_FCW_IM)
6746 {
6747 /* Masked stack overflow, push QNaN. */
6748 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6749 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6750 }
6751 else
6752 {
6753 /* Raise stack overflow, don't push anything. */
6754 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6755 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6756 return;
6757 }
6758
6759 fFsw &= ~X86_FSW_TOP_MASK;
6760 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6761 pFpuCtx->FSW = fFsw;
6762
6763 iemFpuRotateStackPush(pFpuCtx);
6764}
6765
6766
6767/**
6768 * Stores a result in a FPU register and updates the FSW and FTW.
6769 *
6770 * @param pFpuCtx The FPU context.
6771 * @param pResult The result to store.
6772 * @param iStReg Which FPU register to store it in.
6773 */
6774IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6775{
6776 Assert(iStReg < 8);
6777 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6778 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6779 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6780 pFpuCtx->FTW |= RT_BIT(iReg);
6781 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6782}
6783
6784
6785/**
6786 * Only updates the FPU status word (FSW) with the result of the current
6787 * instruction.
6788 *
6789 * @param pFpuCtx The FPU context.
6790 * @param u16FSW The FSW output of the current instruction.
6791 */
6792IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6793{
6794 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6795 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6796}
6797
6798
6799/**
6800 * Pops one item off the FPU stack if no pending exception prevents it.
6801 *
6802 * @param pFpuCtx The FPU context.
6803 */
6804IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6805{
6806 /* Check pending exceptions. */
6807 uint16_t uFSW = pFpuCtx->FSW;
6808 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6809 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6810 return;
6811
6812 /* TOP--. */
6813 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6814 uFSW &= ~X86_FSW_TOP_MASK;
6815 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6816 pFpuCtx->FSW = uFSW;
6817
6818 /* Mark the previous ST0 as empty. */
6819 iOldTop >>= X86_FSW_TOP_SHIFT;
6820 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6821
6822 /* Rotate the registers. */
6823 iemFpuRotateStackPop(pFpuCtx);
6824}
6825
6826
6827/**
6828 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6829 *
6830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6831 * @param pResult The FPU operation result to push.
6832 */
6833IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6834{
6835 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6836 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6837 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6838 iemFpuMaybePushResult(pResult, pFpuCtx);
6839}
6840
6841
6842/**
6843 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6844 * and sets FPUDP and FPUDS.
6845 *
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 * @param pResult The FPU operation result to push.
6848 * @param iEffSeg The effective segment register.
6849 * @param GCPtrEff The effective address relative to @a iEffSeg.
6850 */
6851IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6852{
6853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6855 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6856 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6857 iemFpuMaybePushResult(pResult, pFpuCtx);
6858}
6859
6860
6861/**
6862 * Replace ST0 with the first value and push the second onto the FPU stack,
6863 * unless a pending exception prevents it.
6864 *
6865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6866 * @param pResult The FPU operation result to store and push.
6867 */
6868IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6869{
6870 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6871 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6872 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6873
6874 /* Update FSW and bail if there are pending exceptions afterwards. */
6875 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6876 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6877 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6878 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6879 {
6880 pFpuCtx->FSW = fFsw;
6881 return;
6882 }
6883
6884 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6885 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6886 {
6887 /* All is fine, push the actual value. */
6888 pFpuCtx->FTW |= RT_BIT(iNewTop);
6889 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6890 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6891 }
6892 else if (pFpuCtx->FCW & X86_FCW_IM)
6893 {
6894 /* Masked stack overflow, push QNaN. */
6895 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6896 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6897 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6898 }
6899 else
6900 {
6901 /* Raise stack overflow, don't push anything. */
6902 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6903 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6904 return;
6905 }
6906
6907 fFsw &= ~X86_FSW_TOP_MASK;
6908 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6909 pFpuCtx->FSW = fFsw;
6910
6911 iemFpuRotateStackPush(pFpuCtx);
6912}
6913
6914
6915/**
6916 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6917 * FOP.
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 * @param pResult The result to store.
6921 * @param iStReg Which FPU register to store it in.
6922 */
6923IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6924{
6925 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6926 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6927 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6928 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6929}
6930
6931
6932/**
6933 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6934 * FOP, and then pops the stack.
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 * @param pResult The result to store.
6938 * @param iStReg Which FPU register to store it in.
6939 */
6940IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6941{
6942 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6943 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6944 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6945 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6946 iemFpuMaybePopOne(pFpuCtx);
6947}
6948
6949
6950/**
6951 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6952 * FPUDP, and FPUDS.
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 * @param pResult The result to store.
6956 * @param iStReg Which FPU register to store it in.
6957 * @param iEffSeg The effective memory operand selector register.
6958 * @param GCPtrEff The effective memory operand offset.
6959 */
6960IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6961 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6962{
6963 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6964 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6965 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6966 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6967 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6968}
6969
6970
6971/**
6972 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6973 * FPUDP, and FPUDS, and then pops the stack.
6974 *
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 * @param pResult The result to store.
6977 * @param iStReg Which FPU register to store it in.
6978 * @param iEffSeg The effective memory operand selector register.
6979 * @param GCPtrEff The effective memory operand offset.
6980 */
6981IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6982 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6983{
6984 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6985 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6986 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6987 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6988 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6989 iemFpuMaybePopOne(pFpuCtx);
6990}
6991
6992
6993/**
6994 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6995 *
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 */
6998IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6999{
7000 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7001 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7002 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7003}
7004
7005
7006/**
7007 * Marks the specified stack register as free (for FFREE).
7008 *
7009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7010 * @param iStReg The register to free.
7011 */
7012IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7013{
7014 Assert(iStReg < 8);
7015 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7016 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7017 pFpuCtx->FTW &= ~RT_BIT(iReg);
7018}
7019
7020
7021/**
7022 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7023 *
7024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7025 */
7026IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7027{
7028 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7029 uint16_t uFsw = pFpuCtx->FSW;
7030 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7031 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7032 uFsw &= ~X86_FSW_TOP_MASK;
7033 uFsw |= uTop;
7034 pFpuCtx->FSW = uFsw;
7035}
7036
7037
7038/**
7039 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7040 *
7041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7042 */
7043IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7044{
7045 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7046 uint16_t uFsw = pFpuCtx->FSW;
7047 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7048 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7049 uFsw &= ~X86_FSW_TOP_MASK;
7050 uFsw |= uTop;
7051 pFpuCtx->FSW = uFsw;
7052}
7053
7054
7055/**
7056 * Updates the FSW, FOP, FPUIP, and FPUCS.
7057 *
7058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7059 * @param u16FSW The FSW from the current instruction.
7060 */
7061IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7062{
7063 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7064 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7065 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7066 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7067}
7068
7069
7070/**
7071 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7072 *
7073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7074 * @param u16FSW The FSW from the current instruction.
7075 */
7076IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7077{
7078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7079 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7080 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7081 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7082 iemFpuMaybePopOne(pFpuCtx);
7083}
7084
7085
7086/**
7087 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7088 *
7089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7090 * @param u16FSW The FSW from the current instruction.
7091 * @param iEffSeg The effective memory operand selector register.
7092 * @param GCPtrEff The effective memory operand offset.
7093 */
7094IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7095{
7096 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7097 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7098 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7099 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7100 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7101}
7102
7103
7104/**
7105 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7106 *
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 * @param u16FSW The FSW from the current instruction.
7109 */
7110IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7111{
7112 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7113 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7114 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7115 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7116 iemFpuMaybePopOne(pFpuCtx);
7117 iemFpuMaybePopOne(pFpuCtx);
7118}
7119
7120
7121/**
7122 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7123 *
7124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7125 * @param u16FSW The FSW from the current instruction.
7126 * @param iEffSeg The effective memory operand selector register.
7127 * @param GCPtrEff The effective memory operand offset.
7128 */
7129IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7130{
7131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7132 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7133 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7134 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7135 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7136 iemFpuMaybePopOne(pFpuCtx);
7137}
7138
7139
7140/**
7141 * Worker routine for raising an FPU stack underflow exception.
7142 *
7143 * @param pFpuCtx The FPU context.
7144 * @param iStReg The stack register being accessed.
7145 */
7146IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7147{
7148 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7149 if (pFpuCtx->FCW & X86_FCW_IM)
7150 {
7151 /* Masked underflow. */
7152 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7153 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7154 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7155 if (iStReg != UINT8_MAX)
7156 {
7157 pFpuCtx->FTW |= RT_BIT(iReg);
7158 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7159 }
7160 }
7161 else
7162 {
7163 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7164 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7165 }
7166}
7167
7168
7169/**
7170 * Raises a FPU stack underflow exception.
7171 *
7172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7173 * @param iStReg The destination register that should be loaded
7174 * with QNaN if \#IS is not masked. Specify
7175 * UINT8_MAX if none (like for fcom).
7176 */
7177DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7178{
7179 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7180 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7181 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7182 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7183}
7184
7185
7186DECL_NO_INLINE(IEM_STATIC, void)
7187iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7188{
7189 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7190 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7191 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7192 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7193 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7194}
7195
7196
7197DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7198{
7199 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7200 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7201 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7202 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7203 iemFpuMaybePopOne(pFpuCtx);
7204}
7205
7206
7207DECL_NO_INLINE(IEM_STATIC, void)
7208iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7209{
7210 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7211 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7212 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7213 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7214 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7215 iemFpuMaybePopOne(pFpuCtx);
7216}
7217
7218
7219DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7220{
7221 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7222 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7223 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7224 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7225 iemFpuMaybePopOne(pFpuCtx);
7226 iemFpuMaybePopOne(pFpuCtx);
7227}
7228
7229
7230DECL_NO_INLINE(IEM_STATIC, void)
7231iemFpuStackPushUnderflow(PVMCPU pVCpu)
7232{
7233 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7234 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7235 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7236
7237 if (pFpuCtx->FCW & X86_FCW_IM)
7238 {
7239 /* Masked overflow - Push QNaN. */
7240 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7241 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7242 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7243 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7244 pFpuCtx->FTW |= RT_BIT(iNewTop);
7245 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7246 iemFpuRotateStackPush(pFpuCtx);
7247 }
7248 else
7249 {
7250 /* Exception pending - don't change TOP or the register stack. */
7251 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7252 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7253 }
7254}
7255
7256
7257DECL_NO_INLINE(IEM_STATIC, void)
7258iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7259{
7260 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7261 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7262 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7263
7264 if (pFpuCtx->FCW & X86_FCW_IM)
7265 {
7266 /* Masked overflow - Push QNaN. */
7267 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7268 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7269 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7270 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7271 pFpuCtx->FTW |= RT_BIT(iNewTop);
7272 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7273 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7274 iemFpuRotateStackPush(pFpuCtx);
7275 }
7276 else
7277 {
7278 /* Exception pending - don't change TOP or the register stack. */
7279 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7280 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7281 }
7282}
7283
7284
7285/**
7286 * Worker routine for raising an FPU stack overflow exception on a push.
7287 *
7288 * @param pFpuCtx The FPU context.
7289 */
7290IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7291{
7292 if (pFpuCtx->FCW & X86_FCW_IM)
7293 {
7294 /* Masked overflow. */
7295 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7296 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7297 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7298 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7299 pFpuCtx->FTW |= RT_BIT(iNewTop);
7300 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7301 iemFpuRotateStackPush(pFpuCtx);
7302 }
7303 else
7304 {
7305 /* Exception pending - don't change TOP or the register stack. */
7306 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7307 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7308 }
7309}
7310
7311
7312/**
7313 * Raises a FPU stack overflow exception on a push.
7314 *
7315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7316 */
7317DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7318{
7319 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7320 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7321 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7322 iemFpuStackPushOverflowOnly(pFpuCtx);
7323}
7324
7325
7326/**
7327 * Raises a FPU stack overflow exception on a push with a memory operand.
7328 *
7329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7330 * @param iEffSeg The effective memory operand selector register.
7331 * @param GCPtrEff The effective memory operand offset.
7332 */
7333DECL_NO_INLINE(IEM_STATIC, void)
7334iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7335{
7336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7337 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7338 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7339 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7340 iemFpuStackPushOverflowOnly(pFpuCtx);
7341}
7342
7343
7344IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7345{
7346 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7347 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7348 if (pFpuCtx->FTW & RT_BIT(iReg))
7349 return VINF_SUCCESS;
7350 return VERR_NOT_FOUND;
7351}
7352
7353
7354IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7355{
7356 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7357 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7358 if (pFpuCtx->FTW & RT_BIT(iReg))
7359 {
7360 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7361 return VINF_SUCCESS;
7362 }
7363 return VERR_NOT_FOUND;
7364}
7365
7366
7367IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7368 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7369{
7370 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7371 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7372 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7373 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7374 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7375 {
7376 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7377 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7378 return VINF_SUCCESS;
7379 }
7380 return VERR_NOT_FOUND;
7381}
7382
7383
7384IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7385{
7386 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7387 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7388 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7389 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7390 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7391 {
7392 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7393 return VINF_SUCCESS;
7394 }
7395 return VERR_NOT_FOUND;
7396}
7397
7398
7399/**
7400 * Updates the FPU exception status after FCW is changed.
7401 *
7402 * @param pFpuCtx The FPU context.
7403 */
7404IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7405{
7406 uint16_t u16Fsw = pFpuCtx->FSW;
7407 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7408 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7409 else
7410 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7411 pFpuCtx->FSW = u16Fsw;
7412}
7413
7414
7415/**
7416 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7417 *
7418 * @returns The full FTW.
7419 * @param pFpuCtx The FPU context.
7420 */
7421IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7422{
7423 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7424 uint16_t u16Ftw = 0;
7425 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7426 for (unsigned iSt = 0; iSt < 8; iSt++)
7427 {
7428 unsigned const iReg = (iSt + iTop) & 7;
7429 if (!(u8Ftw & RT_BIT(iReg)))
7430 u16Ftw |= 3 << (iReg * 2); /* empty */
7431 else
7432 {
7433 uint16_t uTag;
7434 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7435 if (pr80Reg->s.uExponent == 0x7fff)
7436 uTag = 2; /* Exponent is all 1's => Special. */
7437 else if (pr80Reg->s.uExponent == 0x0000)
7438 {
7439 if (pr80Reg->s.u64Mantissa == 0x0000)
7440 uTag = 1; /* All bits are zero => Zero. */
7441 else
7442 uTag = 2; /* Must be special. */
7443 }
7444 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7445 uTag = 0; /* Valid. */
7446 else
7447 uTag = 2; /* Must be special. */
7448
7449 u16Ftw |= uTag << (iReg * 2); /* empty */
7450 }
7451 }
7452
7453 return u16Ftw;
7454}
7455
7456
7457/**
7458 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7459 *
7460 * @returns The compressed FTW.
7461 * @param u16FullFtw The full FTW to convert.
7462 */
7463IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7464{
7465 uint8_t u8Ftw = 0;
7466 for (unsigned i = 0; i < 8; i++)
7467 {
7468 if ((u16FullFtw & 3) != 3 /*empty*/)
7469 u8Ftw |= RT_BIT(i);
7470 u16FullFtw >>= 2;
7471 }
7472
7473 return u8Ftw;
7474}
7475
7476/** @} */
7477
7478
7479/** @name Memory access.
7480 *
7481 * @{
7482 */
7483
7484
7485/**
7486 * Updates the IEMCPU::cbWritten counter if applicable.
7487 *
7488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7489 * @param fAccess The access being accounted for.
7490 * @param cbMem The access size.
7491 */
7492DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7493{
7494 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7495 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7496 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7497}
7498
7499
7500/**
7501 * Checks if the given segment can be written to, raise the appropriate
7502 * exception if not.
7503 *
7504 * @returns VBox strict status code.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param pHid Pointer to the hidden register.
7508 * @param iSegReg The register number.
7509 * @param pu64BaseAddr Where to return the base address to use for the
7510 * segment. (In 64-bit code it may differ from the
7511 * base in the hidden segment.)
7512 */
7513IEM_STATIC VBOXSTRICTRC
7514iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7515{
7516 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7517 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7518 else
7519 {
7520 if (!pHid->Attr.n.u1Present)
7521 {
7522 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7523 AssertRelease(uSel == 0);
7524 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7525 return iemRaiseGeneralProtectionFault0(pVCpu);
7526 }
7527
7528 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7529 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7530 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7531 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7532 *pu64BaseAddr = pHid->u64Base;
7533 }
7534 return VINF_SUCCESS;
7535}
7536
7537
7538/**
7539 * Checks if the given segment can be read from, raise the appropriate
7540 * exception if not.
7541 *
7542 * @returns VBox strict status code.
7543 *
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 * @param pHid Pointer to the hidden register.
7546 * @param iSegReg The register number.
7547 * @param pu64BaseAddr Where to return the base address to use for the
7548 * segment. (In 64-bit code it may differ from the
7549 * base in the hidden segment.)
7550 */
7551IEM_STATIC VBOXSTRICTRC
7552iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7553{
7554 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7555 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7556 else
7557 {
7558 if (!pHid->Attr.n.u1Present)
7559 {
7560 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7561 AssertRelease(uSel == 0);
7562 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7563 return iemRaiseGeneralProtectionFault0(pVCpu);
7564 }
7565
7566 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7567 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7568 *pu64BaseAddr = pHid->u64Base;
7569 }
7570 return VINF_SUCCESS;
7571}
7572
7573
7574/**
7575 * Applies the segment limit, base and attributes.
7576 *
7577 * This may raise a \#GP or \#SS.
7578 *
7579 * @returns VBox strict status code.
7580 *
7581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7582 * @param fAccess The kind of access which is being performed.
7583 * @param iSegReg The index of the segment register to apply.
7584 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7585 * TSS, ++).
7586 * @param cbMem The access size.
7587 * @param pGCPtrMem Pointer to the guest memory address to apply
7588 * segmentation to. Input and output parameter.
7589 */
7590IEM_STATIC VBOXSTRICTRC
7591iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7592{
7593 if (iSegReg == UINT8_MAX)
7594 return VINF_SUCCESS;
7595
7596 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7597 switch (pVCpu->iem.s.enmCpuMode)
7598 {
7599 case IEMMODE_16BIT:
7600 case IEMMODE_32BIT:
7601 {
7602 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7603 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7604
7605 if ( pSel->Attr.n.u1Present
7606 && !pSel->Attr.n.u1Unusable)
7607 {
7608 Assert(pSel->Attr.n.u1DescType);
7609 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7610 {
7611 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7612 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7613 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7614
7615 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7616 {
7617 /** @todo CPL check. */
7618 }
7619
7620 /*
7621 * There are two kinds of data selectors, normal and expand down.
7622 */
7623 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7624 {
7625 if ( GCPtrFirst32 > pSel->u32Limit
7626 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7627 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7628 }
7629 else
7630 {
7631 /*
7632 * The upper boundary is defined by the B bit, not the G bit!
7633 */
7634 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7635 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7636 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7637 }
7638 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7639 }
7640 else
7641 {
7642
7643 /*
7644 * Code selector and usually be used to read thru, writing is
7645 * only permitted in real and V8086 mode.
7646 */
7647 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7648 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7649 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7650 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7651 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7652
7653 if ( GCPtrFirst32 > pSel->u32Limit
7654 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7655 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7656
7657 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7658 {
7659 /** @todo CPL check. */
7660 }
7661
7662 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7663 }
7664 }
7665 else
7666 return iemRaiseGeneralProtectionFault0(pVCpu);
7667 return VINF_SUCCESS;
7668 }
7669
7670 case IEMMODE_64BIT:
7671 {
7672 RTGCPTR GCPtrMem = *pGCPtrMem;
7673 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7674 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7675
7676 Assert(cbMem >= 1);
7677 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7678 return VINF_SUCCESS;
7679 return iemRaiseGeneralProtectionFault0(pVCpu);
7680 }
7681
7682 default:
7683 AssertFailedReturn(VERR_IEM_IPE_7);
7684 }
7685}
7686
7687
7688/**
7689 * Translates a virtual address to a physical physical address and checks if we
7690 * can access the page as specified.
7691 *
7692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7693 * @param GCPtrMem The virtual address.
7694 * @param fAccess The intended access.
7695 * @param pGCPhysMem Where to return the physical address.
7696 */
7697IEM_STATIC VBOXSTRICTRC
7698iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7699{
7700 /** @todo Need a different PGM interface here. We're currently using
7701 * generic / REM interfaces. this won't cut it for R0 & RC. */
7702 RTGCPHYS GCPhys;
7703 uint64_t fFlags;
7704 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7705 if (RT_FAILURE(rc))
7706 {
7707 /** @todo Check unassigned memory in unpaged mode. */
7708 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7709 *pGCPhysMem = NIL_RTGCPHYS;
7710 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7711 }
7712
7713 /* If the page is writable and does not have the no-exec bit set, all
7714 access is allowed. Otherwise we'll have to check more carefully... */
7715 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7716 {
7717 /* Write to read only memory? */
7718 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7719 && !(fFlags & X86_PTE_RW)
7720 && ( (pVCpu->iem.s.uCpl == 3
7721 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7722 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7723 {
7724 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7725 *pGCPhysMem = NIL_RTGCPHYS;
7726 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7727 }
7728
7729 /* Kernel memory accessed by userland? */
7730 if ( !(fFlags & X86_PTE_US)
7731 && pVCpu->iem.s.uCpl == 3
7732 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7733 {
7734 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7735 *pGCPhysMem = NIL_RTGCPHYS;
7736 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7737 }
7738
7739 /* Executing non-executable memory? */
7740 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7741 && (fFlags & X86_PTE_PAE_NX)
7742 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7743 {
7744 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7745 *pGCPhysMem = NIL_RTGCPHYS;
7746 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7747 VERR_ACCESS_DENIED);
7748 }
7749 }
7750
7751 /*
7752 * Set the dirty / access flags.
7753 * ASSUMES this is set when the address is translated rather than on committ...
7754 */
7755 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7756 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7757 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7758 {
7759 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7760 AssertRC(rc2);
7761 }
7762
7763 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7764 *pGCPhysMem = GCPhys;
7765 return VINF_SUCCESS;
7766}
7767
7768
7769
7770/**
7771 * Maps a physical page.
7772 *
7773 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7775 * @param GCPhysMem The physical address.
7776 * @param fAccess The intended access.
7777 * @param ppvMem Where to return the mapping address.
7778 * @param pLock The PGM lock.
7779 */
7780IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7781{
7782#ifdef IEM_VERIFICATION_MODE_FULL
7783 /* Force the alternative path so we can ignore writes. */
7784 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7785 {
7786 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7787 {
7788 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7789 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7790 if (RT_FAILURE(rc2))
7791 pVCpu->iem.s.fProblematicMemory = true;
7792 }
7793 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7794 }
7795#endif
7796#ifdef IEM_LOG_MEMORY_WRITES
7797 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7798 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7799#endif
7800#ifdef IEM_VERIFICATION_MODE_MINIMAL
7801 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7802#endif
7803
7804 /** @todo This API may require some improving later. A private deal with PGM
7805 * regarding locking and unlocking needs to be struct. A couple of TLBs
7806 * living in PGM, but with publicly accessible inlined access methods
7807 * could perhaps be an even better solution. */
7808 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7809 GCPhysMem,
7810 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7811 pVCpu->iem.s.fBypassHandlers,
7812 ppvMem,
7813 pLock);
7814 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7815 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7816
7817#ifdef IEM_VERIFICATION_MODE_FULL
7818 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7819 pVCpu->iem.s.fProblematicMemory = true;
7820#endif
7821 return rc;
7822}
7823
7824
7825/**
7826 * Unmap a page previously mapped by iemMemPageMap.
7827 *
7828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7829 * @param GCPhysMem The physical address.
7830 * @param fAccess The intended access.
7831 * @param pvMem What iemMemPageMap returned.
7832 * @param pLock The PGM lock.
7833 */
7834DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7835{
7836 NOREF(pVCpu);
7837 NOREF(GCPhysMem);
7838 NOREF(fAccess);
7839 NOREF(pvMem);
7840 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7841}
7842
7843
7844/**
7845 * Looks up a memory mapping entry.
7846 *
7847 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7849 * @param pvMem The memory address.
7850 * @param fAccess The access to.
7851 */
7852DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7853{
7854 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7855 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7856 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7857 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7858 return 0;
7859 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7860 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7861 return 1;
7862 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7863 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7864 return 2;
7865 return VERR_NOT_FOUND;
7866}
7867
7868
7869/**
7870 * Finds a free memmap entry when using iNextMapping doesn't work.
7871 *
7872 * @returns Memory mapping index, 1024 on failure.
7873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7874 */
7875IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7876{
7877 /*
7878 * The easy case.
7879 */
7880 if (pVCpu->iem.s.cActiveMappings == 0)
7881 {
7882 pVCpu->iem.s.iNextMapping = 1;
7883 return 0;
7884 }
7885
7886 /* There should be enough mappings for all instructions. */
7887 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7888
7889 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7890 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7891 return i;
7892
7893 AssertFailedReturn(1024);
7894}
7895
7896
7897/**
7898 * Commits a bounce buffer that needs writing back and unmaps it.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param iMemMap The index of the buffer to commit.
7903 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7904 * Always false in ring-3, obviously.
7905 */
7906IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7907{
7908 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7909 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7910#ifdef IN_RING3
7911 Assert(!fPostponeFail);
7912 RT_NOREF_PV(fPostponeFail);
7913#endif
7914
7915 /*
7916 * Do the writing.
7917 */
7918#ifndef IEM_VERIFICATION_MODE_MINIMAL
7919 PVM pVM = pVCpu->CTX_SUFF(pVM);
7920 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7921 && !IEM_VERIFICATION_ENABLED(pVCpu))
7922 {
7923 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7924 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7925 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7926 if (!pVCpu->iem.s.fBypassHandlers)
7927 {
7928 /*
7929 * Carefully and efficiently dealing with access handler return
7930 * codes make this a little bloated.
7931 */
7932 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7934 pbBuf,
7935 cbFirst,
7936 PGMACCESSORIGIN_IEM);
7937 if (rcStrict == VINF_SUCCESS)
7938 {
7939 if (cbSecond)
7940 {
7941 rcStrict = PGMPhysWrite(pVM,
7942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7943 pbBuf + cbFirst,
7944 cbSecond,
7945 PGMACCESSORIGIN_IEM);
7946 if (rcStrict == VINF_SUCCESS)
7947 { /* nothing */ }
7948 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7949 {
7950 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7951 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7952 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7953 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7954 }
7955# ifndef IN_RING3
7956 else if (fPostponeFail)
7957 {
7958 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7959 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7960 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7961 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7962 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7963 return iemSetPassUpStatus(pVCpu, rcStrict);
7964 }
7965# endif
7966 else
7967 {
7968 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7969 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7970 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7971 return rcStrict;
7972 }
7973 }
7974 }
7975 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7976 {
7977 if (!cbSecond)
7978 {
7979 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7980 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7981 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7982 }
7983 else
7984 {
7985 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7986 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7987 pbBuf + cbFirst,
7988 cbSecond,
7989 PGMACCESSORIGIN_IEM);
7990 if (rcStrict2 == VINF_SUCCESS)
7991 {
7992 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7993 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7994 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7995 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7996 }
7997 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7998 {
7999 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8000 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8001 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8002 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8003 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8004 }
8005# ifndef IN_RING3
8006 else if (fPostponeFail)
8007 {
8008 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8009 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8010 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8011 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8012 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8013 return iemSetPassUpStatus(pVCpu, rcStrict);
8014 }
8015# endif
8016 else
8017 {
8018 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8019 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8020 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8021 return rcStrict2;
8022 }
8023 }
8024 }
8025# ifndef IN_RING3
8026 else if (fPostponeFail)
8027 {
8028 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8029 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8030 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8031 if (!cbSecond)
8032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8033 else
8034 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8035 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8036 return iemSetPassUpStatus(pVCpu, rcStrict);
8037 }
8038# endif
8039 else
8040 {
8041 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8042 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8043 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8044 return rcStrict;
8045 }
8046 }
8047 else
8048 {
8049 /*
8050 * No access handlers, much simpler.
8051 */
8052 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8053 if (RT_SUCCESS(rc))
8054 {
8055 if (cbSecond)
8056 {
8057 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8058 if (RT_SUCCESS(rc))
8059 { /* likely */ }
8060 else
8061 {
8062 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8063 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8064 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8065 return rc;
8066 }
8067 }
8068 }
8069 else
8070 {
8071 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8074 return rc;
8075 }
8076 }
8077 }
8078#endif
8079
8080#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8081 /*
8082 * Record the write(s).
8083 */
8084 if (!pVCpu->iem.s.fNoRem)
8085 {
8086 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8087 if (pEvtRec)
8088 {
8089 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8090 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8091 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8092 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8093 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8094 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8095 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8096 }
8097 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8098 {
8099 pEvtRec = iemVerifyAllocRecord(pVCpu);
8100 if (pEvtRec)
8101 {
8102 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8103 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8104 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8105 memcpy(pEvtRec->u.RamWrite.ab,
8106 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8107 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8108 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8109 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8110 }
8111 }
8112 }
8113#endif
8114#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8115 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8116 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8117 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8118 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8119 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8120 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8121
8122 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8123 g_cbIemWrote = cbWrote;
8124 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8125#endif
8126
8127 /*
8128 * Free the mapping entry.
8129 */
8130 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8131 Assert(pVCpu->iem.s.cActiveMappings != 0);
8132 pVCpu->iem.s.cActiveMappings--;
8133 return VINF_SUCCESS;
8134}
8135
8136
8137/**
8138 * iemMemMap worker that deals with a request crossing pages.
8139 */
8140IEM_STATIC VBOXSTRICTRC
8141iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8142{
8143 /*
8144 * Do the address translations.
8145 */
8146 RTGCPHYS GCPhysFirst;
8147 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8148 if (rcStrict != VINF_SUCCESS)
8149 return rcStrict;
8150
8151 RTGCPHYS GCPhysSecond;
8152 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8153 fAccess, &GCPhysSecond);
8154 if (rcStrict != VINF_SUCCESS)
8155 return rcStrict;
8156 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8157
8158 PVM pVM = pVCpu->CTX_SUFF(pVM);
8159#ifdef IEM_VERIFICATION_MODE_FULL
8160 /*
8161 * Detect problematic memory when verifying so we can select
8162 * the right execution engine. (TLB: Redo this.)
8163 */
8164 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8165 {
8166 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8167 if (RT_SUCCESS(rc2))
8168 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8169 if (RT_FAILURE(rc2))
8170 pVCpu->iem.s.fProblematicMemory = true;
8171 }
8172#endif
8173
8174
8175 /*
8176 * Read in the current memory content if it's a read, execute or partial
8177 * write access.
8178 */
8179 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8180 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8181 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8182
8183 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8184 {
8185 if (!pVCpu->iem.s.fBypassHandlers)
8186 {
8187 /*
8188 * Must carefully deal with access handler status codes here,
8189 * makes the code a bit bloated.
8190 */
8191 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8192 if (rcStrict == VINF_SUCCESS)
8193 {
8194 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8195 if (rcStrict == VINF_SUCCESS)
8196 { /*likely */ }
8197 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8198 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8199 else
8200 {
8201 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8202 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8203 return rcStrict;
8204 }
8205 }
8206 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8207 {
8208 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8209 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8210 {
8211 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8212 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8213 }
8214 else
8215 {
8216 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8217 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8218 return rcStrict2;
8219 }
8220 }
8221 else
8222 {
8223 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8224 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8225 return rcStrict;
8226 }
8227 }
8228 else
8229 {
8230 /*
8231 * No informational status codes here, much more straight forward.
8232 */
8233 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8234 if (RT_SUCCESS(rc))
8235 {
8236 Assert(rc == VINF_SUCCESS);
8237 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8238 if (RT_SUCCESS(rc))
8239 Assert(rc == VINF_SUCCESS);
8240 else
8241 {
8242 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8243 return rc;
8244 }
8245 }
8246 else
8247 {
8248 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8249 return rc;
8250 }
8251 }
8252
8253#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8254 if ( !pVCpu->iem.s.fNoRem
8255 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8256 {
8257 /*
8258 * Record the reads.
8259 */
8260 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8261 if (pEvtRec)
8262 {
8263 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8264 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8265 pEvtRec->u.RamRead.cb = cbFirstPage;
8266 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8267 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8268 }
8269 pEvtRec = iemVerifyAllocRecord(pVCpu);
8270 if (pEvtRec)
8271 {
8272 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8273 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8274 pEvtRec->u.RamRead.cb = cbSecondPage;
8275 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8276 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8277 }
8278 }
8279#endif
8280 }
8281#ifdef VBOX_STRICT
8282 else
8283 memset(pbBuf, 0xcc, cbMem);
8284 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8285 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8286#endif
8287
8288 /*
8289 * Commit the bounce buffer entry.
8290 */
8291 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8292 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8293 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8294 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8295 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8296 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8297 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8298 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8299 pVCpu->iem.s.cActiveMappings++;
8300
8301 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8302 *ppvMem = pbBuf;
8303 return VINF_SUCCESS;
8304}
8305
8306
8307/**
8308 * iemMemMap woker that deals with iemMemPageMap failures.
8309 */
8310IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8311 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8312{
8313 /*
8314 * Filter out conditions we can handle and the ones which shouldn't happen.
8315 */
8316 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8317 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8318 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8319 {
8320 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8321 return rcMap;
8322 }
8323 pVCpu->iem.s.cPotentialExits++;
8324
8325 /*
8326 * Read in the current memory content if it's a read, execute or partial
8327 * write access.
8328 */
8329 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8330 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8331 {
8332 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8333 memset(pbBuf, 0xff, cbMem);
8334 else
8335 {
8336 int rc;
8337 if (!pVCpu->iem.s.fBypassHandlers)
8338 {
8339 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8340 if (rcStrict == VINF_SUCCESS)
8341 { /* nothing */ }
8342 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8343 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8344 else
8345 {
8346 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8347 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8348 return rcStrict;
8349 }
8350 }
8351 else
8352 {
8353 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8354 if (RT_SUCCESS(rc))
8355 { /* likely */ }
8356 else
8357 {
8358 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8359 GCPhysFirst, rc));
8360 return rc;
8361 }
8362 }
8363 }
8364
8365#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8366 if ( !pVCpu->iem.s.fNoRem
8367 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8368 {
8369 /*
8370 * Record the read.
8371 */
8372 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8373 if (pEvtRec)
8374 {
8375 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8376 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8377 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8378 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8379 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8380 }
8381 }
8382#endif
8383 }
8384#ifdef VBOX_STRICT
8385 else
8386 memset(pbBuf, 0xcc, cbMem);
8387#endif
8388#ifdef VBOX_STRICT
8389 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8390 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8391#endif
8392
8393 /*
8394 * Commit the bounce buffer entry.
8395 */
8396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8398 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8401 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8402 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8403 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8404 pVCpu->iem.s.cActiveMappings++;
8405
8406 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8407 *ppvMem = pbBuf;
8408 return VINF_SUCCESS;
8409}
8410
8411
8412
8413/**
8414 * Maps the specified guest memory for the given kind of access.
8415 *
8416 * This may be using bounce buffering of the memory if it's crossing a page
8417 * boundary or if there is an access handler installed for any of it. Because
8418 * of lock prefix guarantees, we're in for some extra clutter when this
8419 * happens.
8420 *
8421 * This may raise a \#GP, \#SS, \#PF or \#AC.
8422 *
8423 * @returns VBox strict status code.
8424 *
8425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8426 * @param ppvMem Where to return the pointer to the mapped
8427 * memory.
8428 * @param cbMem The number of bytes to map. This is usually 1,
8429 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8430 * string operations it can be up to a page.
8431 * @param iSegReg The index of the segment register to use for
8432 * this access. The base and limits are checked.
8433 * Use UINT8_MAX to indicate that no segmentation
8434 * is required (for IDT, GDT and LDT accesses).
8435 * @param GCPtrMem The address of the guest memory.
8436 * @param fAccess How the memory is being accessed. The
8437 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8438 * how to map the memory, while the
8439 * IEM_ACCESS_WHAT_XXX bit is used when raising
8440 * exceptions.
8441 */
8442IEM_STATIC VBOXSTRICTRC
8443iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8444{
8445 /*
8446 * Check the input and figure out which mapping entry to use.
8447 */
8448 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8449 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8450 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8451
8452 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8453 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8454 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8455 {
8456 iMemMap = iemMemMapFindFree(pVCpu);
8457 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8458 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8459 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8460 pVCpu->iem.s.aMemMappings[2].fAccess),
8461 VERR_IEM_IPE_9);
8462 }
8463
8464 /*
8465 * Map the memory, checking that we can actually access it. If something
8466 * slightly complicated happens, fall back on bounce buffering.
8467 */
8468 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8469 if (rcStrict != VINF_SUCCESS)
8470 return rcStrict;
8471
8472 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8473 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8474
8475 RTGCPHYS GCPhysFirst;
8476 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8477 if (rcStrict != VINF_SUCCESS)
8478 return rcStrict;
8479
8480 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8481 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8482 if (fAccess & IEM_ACCESS_TYPE_READ)
8483 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8484
8485 void *pvMem;
8486 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8487 if (rcStrict != VINF_SUCCESS)
8488 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8489
8490 /*
8491 * Fill in the mapping table entry.
8492 */
8493 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8494 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8495 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8496 pVCpu->iem.s.cActiveMappings++;
8497
8498 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8499 *ppvMem = pvMem;
8500 return VINF_SUCCESS;
8501}
8502
8503
8504/**
8505 * Commits the guest memory if bounce buffered and unmaps it.
8506 *
8507 * @returns Strict VBox status code.
8508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8509 * @param pvMem The mapping.
8510 * @param fAccess The kind of access.
8511 */
8512IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8513{
8514 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8515 AssertReturn(iMemMap >= 0, iMemMap);
8516
8517 /* If it's bounce buffered, we may need to write back the buffer. */
8518 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8519 {
8520 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8521 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8522 }
8523 /* Otherwise unlock it. */
8524 else
8525 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8526
8527 /* Free the entry. */
8528 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8529 Assert(pVCpu->iem.s.cActiveMappings != 0);
8530 pVCpu->iem.s.cActiveMappings--;
8531 return VINF_SUCCESS;
8532}
8533
8534#ifdef IEM_WITH_SETJMP
8535
8536/**
8537 * Maps the specified guest memory for the given kind of access, longjmp on
8538 * error.
8539 *
8540 * This may be using bounce buffering of the memory if it's crossing a page
8541 * boundary or if there is an access handler installed for any of it. Because
8542 * of lock prefix guarantees, we're in for some extra clutter when this
8543 * happens.
8544 *
8545 * This may raise a \#GP, \#SS, \#PF or \#AC.
8546 *
8547 * @returns Pointer to the mapped memory.
8548 *
8549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8550 * @param cbMem The number of bytes to map. This is usually 1,
8551 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8552 * string operations it can be up to a page.
8553 * @param iSegReg The index of the segment register to use for
8554 * this access. The base and limits are checked.
8555 * Use UINT8_MAX to indicate that no segmentation
8556 * is required (for IDT, GDT and LDT accesses).
8557 * @param GCPtrMem The address of the guest memory.
8558 * @param fAccess How the memory is being accessed. The
8559 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8560 * how to map the memory, while the
8561 * IEM_ACCESS_WHAT_XXX bit is used when raising
8562 * exceptions.
8563 */
8564IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8565{
8566 /*
8567 * Check the input and figure out which mapping entry to use.
8568 */
8569 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8570 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8571 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8572
8573 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8574 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8575 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8576 {
8577 iMemMap = iemMemMapFindFree(pVCpu);
8578 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8579 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8580 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8581 pVCpu->iem.s.aMemMappings[2].fAccess),
8582 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8583 }
8584
8585 /*
8586 * Map the memory, checking that we can actually access it. If something
8587 * slightly complicated happens, fall back on bounce buffering.
8588 */
8589 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8590 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8591 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8592
8593 /* Crossing a page boundary? */
8594 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8595 { /* No (likely). */ }
8596 else
8597 {
8598 void *pvMem;
8599 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8600 if (rcStrict == VINF_SUCCESS)
8601 return pvMem;
8602 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8603 }
8604
8605 RTGCPHYS GCPhysFirst;
8606 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8607 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8608 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8609
8610 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8611 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8612 if (fAccess & IEM_ACCESS_TYPE_READ)
8613 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8614
8615 void *pvMem;
8616 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8617 if (rcStrict == VINF_SUCCESS)
8618 { /* likely */ }
8619 else
8620 {
8621 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8622 if (rcStrict == VINF_SUCCESS)
8623 return pvMem;
8624 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8625 }
8626
8627 /*
8628 * Fill in the mapping table entry.
8629 */
8630 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8631 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8632 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8633 pVCpu->iem.s.cActiveMappings++;
8634
8635 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8636 return pvMem;
8637}
8638
8639
8640/**
8641 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8642 *
8643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8644 * @param pvMem The mapping.
8645 * @param fAccess The kind of access.
8646 */
8647IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8648{
8649 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8650 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8651
8652 /* If it's bounce buffered, we may need to write back the buffer. */
8653 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8654 {
8655 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8656 {
8657 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8658 if (rcStrict == VINF_SUCCESS)
8659 return;
8660 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8661 }
8662 }
8663 /* Otherwise unlock it. */
8664 else
8665 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8666
8667 /* Free the entry. */
8668 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8669 Assert(pVCpu->iem.s.cActiveMappings != 0);
8670 pVCpu->iem.s.cActiveMappings--;
8671}
8672
8673#endif
8674
8675#ifndef IN_RING3
8676/**
8677 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8678 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8679 *
8680 * Allows the instruction to be completed and retired, while the IEM user will
8681 * return to ring-3 immediately afterwards and do the postponed writes there.
8682 *
8683 * @returns VBox status code (no strict statuses). Caller must check
8684 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8686 * @param pvMem The mapping.
8687 * @param fAccess The kind of access.
8688 */
8689IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8690{
8691 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8692 AssertReturn(iMemMap >= 0, iMemMap);
8693
8694 /* If it's bounce buffered, we may need to write back the buffer. */
8695 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8696 {
8697 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8698 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8699 }
8700 /* Otherwise unlock it. */
8701 else
8702 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8703
8704 /* Free the entry. */
8705 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8706 Assert(pVCpu->iem.s.cActiveMappings != 0);
8707 pVCpu->iem.s.cActiveMappings--;
8708 return VINF_SUCCESS;
8709}
8710#endif
8711
8712
8713/**
8714 * Rollbacks mappings, releasing page locks and such.
8715 *
8716 * The caller shall only call this after checking cActiveMappings.
8717 *
8718 * @returns Strict VBox status code to pass up.
8719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8720 */
8721IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8722{
8723 Assert(pVCpu->iem.s.cActiveMappings > 0);
8724
8725 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8726 while (iMemMap-- > 0)
8727 {
8728 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8729 if (fAccess != IEM_ACCESS_INVALID)
8730 {
8731 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8732 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8733 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8734 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8735 Assert(pVCpu->iem.s.cActiveMappings > 0);
8736 pVCpu->iem.s.cActiveMappings--;
8737 }
8738 }
8739}
8740
8741
8742/**
8743 * Fetches a data byte.
8744 *
8745 * @returns Strict VBox status code.
8746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8747 * @param pu8Dst Where to return the byte.
8748 * @param iSegReg The index of the segment register to use for
8749 * this access. The base and limits are checked.
8750 * @param GCPtrMem The address of the guest memory.
8751 */
8752IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8753{
8754 /* The lazy approach for now... */
8755 uint8_t const *pu8Src;
8756 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8757 if (rc == VINF_SUCCESS)
8758 {
8759 *pu8Dst = *pu8Src;
8760 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8761 }
8762 return rc;
8763}
8764
8765
8766#ifdef IEM_WITH_SETJMP
8767/**
8768 * Fetches a data byte, longjmp on error.
8769 *
8770 * @returns The byte.
8771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8772 * @param iSegReg The index of the segment register to use for
8773 * this access. The base and limits are checked.
8774 * @param GCPtrMem The address of the guest memory.
8775 */
8776DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8777{
8778 /* The lazy approach for now... */
8779 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8780 uint8_t const bRet = *pu8Src;
8781 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8782 return bRet;
8783}
8784#endif /* IEM_WITH_SETJMP */
8785
8786
8787/**
8788 * Fetches a data word.
8789 *
8790 * @returns Strict VBox status code.
8791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8792 * @param pu16Dst Where to return the word.
8793 * @param iSegReg The index of the segment register to use for
8794 * this access. The base and limits are checked.
8795 * @param GCPtrMem The address of the guest memory.
8796 */
8797IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8798{
8799 /* The lazy approach for now... */
8800 uint16_t const *pu16Src;
8801 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8802 if (rc == VINF_SUCCESS)
8803 {
8804 *pu16Dst = *pu16Src;
8805 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8806 }
8807 return rc;
8808}
8809
8810
8811#ifdef IEM_WITH_SETJMP
8812/**
8813 * Fetches a data word, longjmp on error.
8814 *
8815 * @returns The word
8816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8817 * @param iSegReg The index of the segment register to use for
8818 * this access. The base and limits are checked.
8819 * @param GCPtrMem The address of the guest memory.
8820 */
8821DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8822{
8823 /* The lazy approach for now... */
8824 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8825 uint16_t const u16Ret = *pu16Src;
8826 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8827 return u16Ret;
8828}
8829#endif
8830
8831
8832/**
8833 * Fetches a data dword.
8834 *
8835 * @returns Strict VBox status code.
8836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8837 * @param pu32Dst Where to return the dword.
8838 * @param iSegReg The index of the segment register to use for
8839 * this access. The base and limits are checked.
8840 * @param GCPtrMem The address of the guest memory.
8841 */
8842IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8843{
8844 /* The lazy approach for now... */
8845 uint32_t const *pu32Src;
8846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8847 if (rc == VINF_SUCCESS)
8848 {
8849 *pu32Dst = *pu32Src;
8850 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8851 }
8852 return rc;
8853}
8854
8855
8856#ifdef IEM_WITH_SETJMP
8857
8858IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8859{
8860 Assert(cbMem >= 1);
8861 Assert(iSegReg < X86_SREG_COUNT);
8862
8863 /*
8864 * 64-bit mode is simpler.
8865 */
8866 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8867 {
8868 if (iSegReg >= X86_SREG_FS)
8869 {
8870 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8871 GCPtrMem += pSel->u64Base;
8872 }
8873
8874 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8875 return GCPtrMem;
8876 }
8877 /*
8878 * 16-bit and 32-bit segmentation.
8879 */
8880 else
8881 {
8882 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8883 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8884 == X86DESCATTR_P /* data, expand up */
8885 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8886 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8887 {
8888 /* expand up */
8889 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8890 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8891 && GCPtrLast32 > (uint32_t)GCPtrMem))
8892 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8893 }
8894 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8895 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8896 {
8897 /* expand down */
8898 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8899 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8900 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8901 && GCPtrLast32 > (uint32_t)GCPtrMem))
8902 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8903 }
8904 else
8905 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8906 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8907 }
8908 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8909}
8910
8911
8912IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8913{
8914 Assert(cbMem >= 1);
8915 Assert(iSegReg < X86_SREG_COUNT);
8916
8917 /*
8918 * 64-bit mode is simpler.
8919 */
8920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8921 {
8922 if (iSegReg >= X86_SREG_FS)
8923 {
8924 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8925 GCPtrMem += pSel->u64Base;
8926 }
8927
8928 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8929 return GCPtrMem;
8930 }
8931 /*
8932 * 16-bit and 32-bit segmentation.
8933 */
8934 else
8935 {
8936 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8937 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8938 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8939 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8940 {
8941 /* expand up */
8942 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8943 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8944 && GCPtrLast32 > (uint32_t)GCPtrMem))
8945 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8946 }
8947 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8948 {
8949 /* expand down */
8950 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8951 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8952 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8953 && GCPtrLast32 > (uint32_t)GCPtrMem))
8954 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8955 }
8956 else
8957 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8958 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8959 }
8960 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8961}
8962
8963
8964/**
8965 * Fetches a data dword, longjmp on error, fallback/safe version.
8966 *
8967 * @returns The dword
8968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8969 * @param iSegReg The index of the segment register to use for
8970 * this access. The base and limits are checked.
8971 * @param GCPtrMem The address of the guest memory.
8972 */
8973IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8974{
8975 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8976 uint32_t const u32Ret = *pu32Src;
8977 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8978 return u32Ret;
8979}
8980
8981
8982/**
8983 * Fetches a data dword, longjmp on error.
8984 *
8985 * @returns The dword
8986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8987 * @param iSegReg The index of the segment register to use for
8988 * this access. The base and limits are checked.
8989 * @param GCPtrMem The address of the guest memory.
8990 */
8991DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8992{
8993# ifdef IEM_WITH_DATA_TLB
8994 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8995 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8996 {
8997 /// @todo more later.
8998 }
8999
9000 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9001# else
9002 /* The lazy approach. */
9003 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9004 uint32_t const u32Ret = *pu32Src;
9005 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9006 return u32Ret;
9007# endif
9008}
9009#endif
9010
9011
9012#ifdef SOME_UNUSED_FUNCTION
9013/**
9014 * Fetches a data dword and sign extends it to a qword.
9015 *
9016 * @returns Strict VBox status code.
9017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9018 * @param pu64Dst Where to return the sign extended value.
9019 * @param iSegReg The index of the segment register to use for
9020 * this access. The base and limits are checked.
9021 * @param GCPtrMem The address of the guest memory.
9022 */
9023IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9024{
9025 /* The lazy approach for now... */
9026 int32_t const *pi32Src;
9027 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9028 if (rc == VINF_SUCCESS)
9029 {
9030 *pu64Dst = *pi32Src;
9031 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9032 }
9033#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9034 else
9035 *pu64Dst = 0;
9036#endif
9037 return rc;
9038}
9039#endif
9040
9041
9042/**
9043 * Fetches a data qword.
9044 *
9045 * @returns Strict VBox status code.
9046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9047 * @param pu64Dst Where to return the qword.
9048 * @param iSegReg The index of the segment register to use for
9049 * this access. The base and limits are checked.
9050 * @param GCPtrMem The address of the guest memory.
9051 */
9052IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9053{
9054 /* The lazy approach for now... */
9055 uint64_t const *pu64Src;
9056 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9057 if (rc == VINF_SUCCESS)
9058 {
9059 *pu64Dst = *pu64Src;
9060 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9061 }
9062 return rc;
9063}
9064
9065
9066#ifdef IEM_WITH_SETJMP
9067/**
9068 * Fetches a data qword, longjmp on error.
9069 *
9070 * @returns The qword.
9071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9072 * @param iSegReg The index of the segment register to use for
9073 * this access. The base and limits are checked.
9074 * @param GCPtrMem The address of the guest memory.
9075 */
9076DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9077{
9078 /* The lazy approach for now... */
9079 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9080 uint64_t const u64Ret = *pu64Src;
9081 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9082 return u64Ret;
9083}
9084#endif
9085
9086
9087/**
9088 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9089 *
9090 * @returns Strict VBox status code.
9091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9092 * @param pu64Dst Where to return the qword.
9093 * @param iSegReg The index of the segment register to use for
9094 * this access. The base and limits are checked.
9095 * @param GCPtrMem The address of the guest memory.
9096 */
9097IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9098{
9099 /* The lazy approach for now... */
9100 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9101 if (RT_UNLIKELY(GCPtrMem & 15))
9102 return iemRaiseGeneralProtectionFault0(pVCpu);
9103
9104 uint64_t const *pu64Src;
9105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9106 if (rc == VINF_SUCCESS)
9107 {
9108 *pu64Dst = *pu64Src;
9109 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9110 }
9111 return rc;
9112}
9113
9114
9115#ifdef IEM_WITH_SETJMP
9116/**
9117 * Fetches a data qword, longjmp on error.
9118 *
9119 * @returns The qword.
9120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9121 * @param iSegReg The index of the segment register to use for
9122 * this access. The base and limits are checked.
9123 * @param GCPtrMem The address of the guest memory.
9124 */
9125DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9126{
9127 /* The lazy approach for now... */
9128 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9129 if (RT_LIKELY(!(GCPtrMem & 15)))
9130 {
9131 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9132 uint64_t const u64Ret = *pu64Src;
9133 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9134 return u64Ret;
9135 }
9136
9137 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9138 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9139}
9140#endif
9141
9142
9143/**
9144 * Fetches a data tword.
9145 *
9146 * @returns Strict VBox status code.
9147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9148 * @param pr80Dst Where to return the tword.
9149 * @param iSegReg The index of the segment register to use for
9150 * this access. The base and limits are checked.
9151 * @param GCPtrMem The address of the guest memory.
9152 */
9153IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9154{
9155 /* The lazy approach for now... */
9156 PCRTFLOAT80U pr80Src;
9157 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9158 if (rc == VINF_SUCCESS)
9159 {
9160 *pr80Dst = *pr80Src;
9161 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9162 }
9163 return rc;
9164}
9165
9166
9167#ifdef IEM_WITH_SETJMP
9168/**
9169 * Fetches a data tword, longjmp on error.
9170 *
9171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9172 * @param pr80Dst Where to return the tword.
9173 * @param iSegReg The index of the segment register to use for
9174 * this access. The base and limits are checked.
9175 * @param GCPtrMem The address of the guest memory.
9176 */
9177DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9178{
9179 /* The lazy approach for now... */
9180 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9181 *pr80Dst = *pr80Src;
9182 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9183}
9184#endif
9185
9186
9187/**
9188 * Fetches a data dqword (double qword), generally SSE related.
9189 *
9190 * @returns Strict VBox status code.
9191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9192 * @param pu128Dst Where to return the qword.
9193 * @param iSegReg The index of the segment register to use for
9194 * this access. The base and limits are checked.
9195 * @param GCPtrMem The address of the guest memory.
9196 */
9197IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9198{
9199 /* The lazy approach for now... */
9200 PCRTUINT128U pu128Src;
9201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9202 if (rc == VINF_SUCCESS)
9203 {
9204 pu128Dst->au64[0] = pu128Src->au64[0];
9205 pu128Dst->au64[1] = pu128Src->au64[1];
9206 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9207 }
9208 return rc;
9209}
9210
9211
9212#ifdef IEM_WITH_SETJMP
9213/**
9214 * Fetches a data dqword (double qword), generally SSE related.
9215 *
9216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9217 * @param pu128Dst Where to return the qword.
9218 * @param iSegReg The index of the segment register to use for
9219 * this access. The base and limits are checked.
9220 * @param GCPtrMem The address of the guest memory.
9221 */
9222IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9223{
9224 /* The lazy approach for now... */
9225 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9226 pu128Dst->au64[0] = pu128Src->au64[0];
9227 pu128Dst->au64[1] = pu128Src->au64[1];
9228 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9229}
9230#endif
9231
9232
9233/**
9234 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9235 * related.
9236 *
9237 * Raises \#GP(0) if not aligned.
9238 *
9239 * @returns Strict VBox status code.
9240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9241 * @param pu128Dst Where to return the qword.
9242 * @param iSegReg The index of the segment register to use for
9243 * this access. The base and limits are checked.
9244 * @param GCPtrMem The address of the guest memory.
9245 */
9246IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9247{
9248 /* The lazy approach for now... */
9249 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9250 if ( (GCPtrMem & 15)
9251 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9252 return iemRaiseGeneralProtectionFault0(pVCpu);
9253
9254 PCRTUINT128U pu128Src;
9255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9256 if (rc == VINF_SUCCESS)
9257 {
9258 pu128Dst->au64[0] = pu128Src->au64[0];
9259 pu128Dst->au64[1] = pu128Src->au64[1];
9260 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9261 }
9262 return rc;
9263}
9264
9265
9266#ifdef IEM_WITH_SETJMP
9267/**
9268 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9269 * related, longjmp on error.
9270 *
9271 * Raises \#GP(0) if not aligned.
9272 *
9273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9274 * @param pu128Dst Where to return the qword.
9275 * @param iSegReg The index of the segment register to use for
9276 * this access. The base and limits are checked.
9277 * @param GCPtrMem The address of the guest memory.
9278 */
9279DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9280{
9281 /* The lazy approach for now... */
9282 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9283 if ( (GCPtrMem & 15) == 0
9284 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9285 {
9286 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9287 pu128Dst->au64[0] = pu128Src->au64[0];
9288 pu128Dst->au64[1] = pu128Src->au64[1];
9289 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9290 return;
9291 }
9292
9293 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9294 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9295}
9296#endif
9297
9298
9299
9300/**
9301 * Fetches a descriptor register (lgdt, lidt).
9302 *
9303 * @returns Strict VBox status code.
9304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9305 * @param pcbLimit Where to return the limit.
9306 * @param pGCPtrBase Where to return the base.
9307 * @param iSegReg The index of the segment register to use for
9308 * this access. The base and limits are checked.
9309 * @param GCPtrMem The address of the guest memory.
9310 * @param enmOpSize The effective operand size.
9311 */
9312IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9313 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9314{
9315 /*
9316 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9317 * little special:
9318 * - The two reads are done separately.
9319 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9320 * - We suspect the 386 to actually commit the limit before the base in
9321 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9322 * don't try emulate this eccentric behavior, because it's not well
9323 * enough understood and rather hard to trigger.
9324 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9325 */
9326 VBOXSTRICTRC rcStrict;
9327 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9328 {
9329 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9330 if (rcStrict == VINF_SUCCESS)
9331 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9332 }
9333 else
9334 {
9335 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9336 if (enmOpSize == IEMMODE_32BIT)
9337 {
9338 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9339 {
9340 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9341 if (rcStrict == VINF_SUCCESS)
9342 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9343 }
9344 else
9345 {
9346 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9347 if (rcStrict == VINF_SUCCESS)
9348 {
9349 *pcbLimit = (uint16_t)uTmp;
9350 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9351 }
9352 }
9353 if (rcStrict == VINF_SUCCESS)
9354 *pGCPtrBase = uTmp;
9355 }
9356 else
9357 {
9358 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9359 if (rcStrict == VINF_SUCCESS)
9360 {
9361 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9362 if (rcStrict == VINF_SUCCESS)
9363 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9364 }
9365 }
9366 }
9367 return rcStrict;
9368}
9369
9370
9371
9372/**
9373 * Stores a data byte.
9374 *
9375 * @returns Strict VBox status code.
9376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9377 * @param iSegReg The index of the segment register to use for
9378 * this access. The base and limits are checked.
9379 * @param GCPtrMem The address of the guest memory.
9380 * @param u8Value The value to store.
9381 */
9382IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9383{
9384 /* The lazy approach for now... */
9385 uint8_t *pu8Dst;
9386 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9387 if (rc == VINF_SUCCESS)
9388 {
9389 *pu8Dst = u8Value;
9390 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9391 }
9392 return rc;
9393}
9394
9395
9396#ifdef IEM_WITH_SETJMP
9397/**
9398 * Stores a data byte, longjmp on error.
9399 *
9400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9401 * @param iSegReg The index of the segment register to use for
9402 * this access. The base and limits are checked.
9403 * @param GCPtrMem The address of the guest memory.
9404 * @param u8Value The value to store.
9405 */
9406IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9407{
9408 /* The lazy approach for now... */
9409 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9410 *pu8Dst = u8Value;
9411 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9412}
9413#endif
9414
9415
9416/**
9417 * Stores a data word.
9418 *
9419 * @returns Strict VBox status code.
9420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 * @param u16Value The value to store.
9425 */
9426IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9427{
9428 /* The lazy approach for now... */
9429 uint16_t *pu16Dst;
9430 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9431 if (rc == VINF_SUCCESS)
9432 {
9433 *pu16Dst = u16Value;
9434 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9435 }
9436 return rc;
9437}
9438
9439
9440#ifdef IEM_WITH_SETJMP
9441/**
9442 * Stores a data word, longjmp on error.
9443 *
9444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9445 * @param iSegReg The index of the segment register to use for
9446 * this access. The base and limits are checked.
9447 * @param GCPtrMem The address of the guest memory.
9448 * @param u16Value The value to store.
9449 */
9450IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9451{
9452 /* The lazy approach for now... */
9453 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9454 *pu16Dst = u16Value;
9455 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9456}
9457#endif
9458
9459
9460/**
9461 * Stores a data dword.
9462 *
9463 * @returns Strict VBox status code.
9464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9465 * @param iSegReg The index of the segment register to use for
9466 * this access. The base and limits are checked.
9467 * @param GCPtrMem The address of the guest memory.
9468 * @param u32Value The value to store.
9469 */
9470IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9471{
9472 /* The lazy approach for now... */
9473 uint32_t *pu32Dst;
9474 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9475 if (rc == VINF_SUCCESS)
9476 {
9477 *pu32Dst = u32Value;
9478 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9479 }
9480 return rc;
9481}
9482
9483
9484#ifdef IEM_WITH_SETJMP
9485/**
9486 * Stores a data dword.
9487 *
9488 * @returns Strict VBox status code.
9489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9490 * @param iSegReg The index of the segment register to use for
9491 * this access. The base and limits are checked.
9492 * @param GCPtrMem The address of the guest memory.
9493 * @param u32Value The value to store.
9494 */
9495IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9496{
9497 /* The lazy approach for now... */
9498 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9499 *pu32Dst = u32Value;
9500 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9501}
9502#endif
9503
9504
9505/**
9506 * Stores a data qword.
9507 *
9508 * @returns Strict VBox status code.
9509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9510 * @param iSegReg The index of the segment register to use for
9511 * this access. The base and limits are checked.
9512 * @param GCPtrMem The address of the guest memory.
9513 * @param u64Value The value to store.
9514 */
9515IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9516{
9517 /* The lazy approach for now... */
9518 uint64_t *pu64Dst;
9519 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9520 if (rc == VINF_SUCCESS)
9521 {
9522 *pu64Dst = u64Value;
9523 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9524 }
9525 return rc;
9526}
9527
9528
9529#ifdef IEM_WITH_SETJMP
9530/**
9531 * Stores a data qword, longjmp on error.
9532 *
9533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 * @param u64Value The value to store.
9538 */
9539IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9540{
9541 /* The lazy approach for now... */
9542 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9543 *pu64Dst = u64Value;
9544 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9545}
9546#endif
9547
9548
9549/**
9550 * Stores a data dqword.
9551 *
9552 * @returns Strict VBox status code.
9553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9554 * @param iSegReg The index of the segment register to use for
9555 * this access. The base and limits are checked.
9556 * @param GCPtrMem The address of the guest memory.
9557 * @param u128Value The value to store.
9558 */
9559IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9560{
9561 /* The lazy approach for now... */
9562 PRTUINT128U pu128Dst;
9563 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9564 if (rc == VINF_SUCCESS)
9565 {
9566 pu128Dst->au64[0] = u128Value.au64[0];
9567 pu128Dst->au64[1] = u128Value.au64[1];
9568 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9569 }
9570 return rc;
9571}
9572
9573
9574#ifdef IEM_WITH_SETJMP
9575/**
9576 * Stores a data dqword, longjmp on error.
9577 *
9578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 * @param u128Value The value to store.
9583 */
9584IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9585{
9586 /* The lazy approach for now... */
9587 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9588 pu128Dst->au64[0] = u128Value.au64[0];
9589 pu128Dst->au64[1] = u128Value.au64[1];
9590 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9591}
9592#endif
9593
9594
9595/**
9596 * Stores a data dqword, SSE aligned.
9597 *
9598 * @returns Strict VBox status code.
9599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 * @param u128Value The value to store.
9604 */
9605IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9606{
9607 /* The lazy approach for now... */
9608 if ( (GCPtrMem & 15)
9609 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9610 return iemRaiseGeneralProtectionFault0(pVCpu);
9611
9612 PRTUINT128U pu128Dst;
9613 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9614 if (rc == VINF_SUCCESS)
9615 {
9616 pu128Dst->au64[0] = u128Value.au64[0];
9617 pu128Dst->au64[1] = u128Value.au64[1];
9618 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9619 }
9620 return rc;
9621}
9622
9623
9624#ifdef IEM_WITH_SETJMP
9625/**
9626 * Stores a data dqword, SSE aligned.
9627 *
9628 * @returns Strict VBox status code.
9629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9630 * @param iSegReg The index of the segment register to use for
9631 * this access. The base and limits are checked.
9632 * @param GCPtrMem The address of the guest memory.
9633 * @param u128Value The value to store.
9634 */
9635DECL_NO_INLINE(IEM_STATIC, void)
9636iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9637{
9638 /* The lazy approach for now... */
9639 if ( (GCPtrMem & 15) == 0
9640 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9641 {
9642 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9643 pu128Dst->au64[0] = u128Value.au64[0];
9644 pu128Dst->au64[1] = u128Value.au64[1];
9645 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9646 return;
9647 }
9648
9649 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9650 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9651}
9652#endif
9653
9654
9655/**
9656 * Stores a descriptor register (sgdt, sidt).
9657 *
9658 * @returns Strict VBox status code.
9659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9660 * @param cbLimit The limit.
9661 * @param GCPtrBase The base address.
9662 * @param iSegReg The index of the segment register to use for
9663 * this access. The base and limits are checked.
9664 * @param GCPtrMem The address of the guest memory.
9665 */
9666IEM_STATIC VBOXSTRICTRC
9667iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9668{
9669 /*
9670 * The SIDT and SGDT instructions actually stores the data using two
9671 * independent writes. The instructions does not respond to opsize prefixes.
9672 */
9673 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9674 if (rcStrict == VINF_SUCCESS)
9675 {
9676 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9677 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9678 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9679 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9680 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9681 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9682 else
9683 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9684 }
9685 return rcStrict;
9686}
9687
9688
9689/**
9690 * Pushes a word onto the stack.
9691 *
9692 * @returns Strict VBox status code.
9693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9694 * @param u16Value The value to push.
9695 */
9696IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9697{
9698 /* Increment the stack pointer. */
9699 uint64_t uNewRsp;
9700 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9701 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9702
9703 /* Write the word the lazy way. */
9704 uint16_t *pu16Dst;
9705 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9706 if (rc == VINF_SUCCESS)
9707 {
9708 *pu16Dst = u16Value;
9709 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9710 }
9711
9712 /* Commit the new RSP value unless we an access handler made trouble. */
9713 if (rc == VINF_SUCCESS)
9714 pCtx->rsp = uNewRsp;
9715
9716 return rc;
9717}
9718
9719
9720/**
9721 * Pushes a dword onto the stack.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9725 * @param u32Value The value to push.
9726 */
9727IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9728{
9729 /* Increment the stack pointer. */
9730 uint64_t uNewRsp;
9731 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9732 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9733
9734 /* Write the dword the lazy way. */
9735 uint32_t *pu32Dst;
9736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9737 if (rc == VINF_SUCCESS)
9738 {
9739 *pu32Dst = u32Value;
9740 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9741 }
9742
9743 /* Commit the new RSP value unless we an access handler made trouble. */
9744 if (rc == VINF_SUCCESS)
9745 pCtx->rsp = uNewRsp;
9746
9747 return rc;
9748}
9749
9750
9751/**
9752 * Pushes a dword segment register value onto the stack.
9753 *
9754 * @returns Strict VBox status code.
9755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9756 * @param u32Value The value to push.
9757 */
9758IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9759{
9760 /* Increment the stack pointer. */
9761 uint64_t uNewRsp;
9762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9763 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9764
9765 VBOXSTRICTRC rc;
9766 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9767 {
9768 /* The recompiler writes a full dword. */
9769 uint32_t *pu32Dst;
9770 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9771 if (rc == VINF_SUCCESS)
9772 {
9773 *pu32Dst = u32Value;
9774 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9775 }
9776 }
9777 else
9778 {
9779 /* The intel docs talks about zero extending the selector register
9780 value. My actual intel CPU here might be zero extending the value
9781 but it still only writes the lower word... */
9782 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9783 * happens when crossing an electric page boundrary, is the high word checked
9784 * for write accessibility or not? Probably it is. What about segment limits?
9785 * It appears this behavior is also shared with trap error codes.
9786 *
9787 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9788 * ancient hardware when it actually did change. */
9789 uint16_t *pu16Dst;
9790 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9791 if (rc == VINF_SUCCESS)
9792 {
9793 *pu16Dst = (uint16_t)u32Value;
9794 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9795 }
9796 }
9797
9798 /* Commit the new RSP value unless we an access handler made trouble. */
9799 if (rc == VINF_SUCCESS)
9800 pCtx->rsp = uNewRsp;
9801
9802 return rc;
9803}
9804
9805
9806/**
9807 * Pushes a qword onto the stack.
9808 *
9809 * @returns Strict VBox status code.
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param u64Value The value to push.
9812 */
9813IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9814{
9815 /* Increment the stack pointer. */
9816 uint64_t uNewRsp;
9817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9818 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9819
9820 /* Write the word the lazy way. */
9821 uint64_t *pu64Dst;
9822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9823 if (rc == VINF_SUCCESS)
9824 {
9825 *pu64Dst = u64Value;
9826 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9827 }
9828
9829 /* Commit the new RSP value unless we an access handler made trouble. */
9830 if (rc == VINF_SUCCESS)
9831 pCtx->rsp = uNewRsp;
9832
9833 return rc;
9834}
9835
9836
9837/**
9838 * Pops a word from the stack.
9839 *
9840 * @returns Strict VBox status code.
9841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9842 * @param pu16Value Where to store the popped value.
9843 */
9844IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9845{
9846 /* Increment the stack pointer. */
9847 uint64_t uNewRsp;
9848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9849 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9850
9851 /* Write the word the lazy way. */
9852 uint16_t const *pu16Src;
9853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9854 if (rc == VINF_SUCCESS)
9855 {
9856 *pu16Value = *pu16Src;
9857 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9858
9859 /* Commit the new RSP value. */
9860 if (rc == VINF_SUCCESS)
9861 pCtx->rsp = uNewRsp;
9862 }
9863
9864 return rc;
9865}
9866
9867
9868/**
9869 * Pops a dword from the stack.
9870 *
9871 * @returns Strict VBox status code.
9872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9873 * @param pu32Value Where to store the popped value.
9874 */
9875IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9876{
9877 /* Increment the stack pointer. */
9878 uint64_t uNewRsp;
9879 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9880 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9881
9882 /* Write the word the lazy way. */
9883 uint32_t const *pu32Src;
9884 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9885 if (rc == VINF_SUCCESS)
9886 {
9887 *pu32Value = *pu32Src;
9888 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9889
9890 /* Commit the new RSP value. */
9891 if (rc == VINF_SUCCESS)
9892 pCtx->rsp = uNewRsp;
9893 }
9894
9895 return rc;
9896}
9897
9898
9899/**
9900 * Pops a qword from the stack.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param pu64Value Where to store the popped value.
9905 */
9906IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9907{
9908 /* Increment the stack pointer. */
9909 uint64_t uNewRsp;
9910 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9911 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9912
9913 /* Write the word the lazy way. */
9914 uint64_t const *pu64Src;
9915 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9916 if (rc == VINF_SUCCESS)
9917 {
9918 *pu64Value = *pu64Src;
9919 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9920
9921 /* Commit the new RSP value. */
9922 if (rc == VINF_SUCCESS)
9923 pCtx->rsp = uNewRsp;
9924 }
9925
9926 return rc;
9927}
9928
9929
9930/**
9931 * Pushes a word onto the stack, using a temporary stack pointer.
9932 *
9933 * @returns Strict VBox status code.
9934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9935 * @param u16Value The value to push.
9936 * @param pTmpRsp Pointer to the temporary stack pointer.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9939{
9940 /* Increment the stack pointer. */
9941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9942 RTUINT64U NewRsp = *pTmpRsp;
9943 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9944
9945 /* Write the word the lazy way. */
9946 uint16_t *pu16Dst;
9947 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9948 if (rc == VINF_SUCCESS)
9949 {
9950 *pu16Dst = u16Value;
9951 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9952 }
9953
9954 /* Commit the new RSP value unless we an access handler made trouble. */
9955 if (rc == VINF_SUCCESS)
9956 *pTmpRsp = NewRsp;
9957
9958 return rc;
9959}
9960
9961
9962/**
9963 * Pushes a dword onto the stack, using a temporary stack pointer.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param u32Value The value to push.
9968 * @param pTmpRsp Pointer to the temporary stack pointer.
9969 */
9970IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9971{
9972 /* Increment the stack pointer. */
9973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9974 RTUINT64U NewRsp = *pTmpRsp;
9975 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9976
9977 /* Write the word the lazy way. */
9978 uint32_t *pu32Dst;
9979 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9980 if (rc == VINF_SUCCESS)
9981 {
9982 *pu32Dst = u32Value;
9983 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9984 }
9985
9986 /* Commit the new RSP value unless we an access handler made trouble. */
9987 if (rc == VINF_SUCCESS)
9988 *pTmpRsp = NewRsp;
9989
9990 return rc;
9991}
9992
9993
9994/**
9995 * Pushes a dword onto the stack, using a temporary stack pointer.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param u64Value The value to push.
10000 * @param pTmpRsp Pointer to the temporary stack pointer.
10001 */
10002IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10003{
10004 /* Increment the stack pointer. */
10005 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10006 RTUINT64U NewRsp = *pTmpRsp;
10007 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10008
10009 /* Write the word the lazy way. */
10010 uint64_t *pu64Dst;
10011 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10012 if (rc == VINF_SUCCESS)
10013 {
10014 *pu64Dst = u64Value;
10015 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10016 }
10017
10018 /* Commit the new RSP value unless we an access handler made trouble. */
10019 if (rc == VINF_SUCCESS)
10020 *pTmpRsp = NewRsp;
10021
10022 return rc;
10023}
10024
10025
10026/**
10027 * Pops a word from the stack, using a temporary stack pointer.
10028 *
10029 * @returns Strict VBox status code.
10030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10031 * @param pu16Value Where to store the popped value.
10032 * @param pTmpRsp Pointer to the temporary stack pointer.
10033 */
10034IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10035{
10036 /* Increment the stack pointer. */
10037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10038 RTUINT64U NewRsp = *pTmpRsp;
10039 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10040
10041 /* Write the word the lazy way. */
10042 uint16_t const *pu16Src;
10043 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10044 if (rc == VINF_SUCCESS)
10045 {
10046 *pu16Value = *pu16Src;
10047 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10048
10049 /* Commit the new RSP value. */
10050 if (rc == VINF_SUCCESS)
10051 *pTmpRsp = NewRsp;
10052 }
10053
10054 return rc;
10055}
10056
10057
10058/**
10059 * Pops a dword from the stack, using a temporary stack pointer.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param pu32Value Where to store the popped value.
10064 * @param pTmpRsp Pointer to the temporary stack pointer.
10065 */
10066IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10067{
10068 /* Increment the stack pointer. */
10069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10070 RTUINT64U NewRsp = *pTmpRsp;
10071 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10072
10073 /* Write the word the lazy way. */
10074 uint32_t const *pu32Src;
10075 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10076 if (rc == VINF_SUCCESS)
10077 {
10078 *pu32Value = *pu32Src;
10079 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10080
10081 /* Commit the new RSP value. */
10082 if (rc == VINF_SUCCESS)
10083 *pTmpRsp = NewRsp;
10084 }
10085
10086 return rc;
10087}
10088
10089
10090/**
10091 * Pops a qword from the stack, using a temporary stack pointer.
10092 *
10093 * @returns Strict VBox status code.
10094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10095 * @param pu64Value Where to store the popped value.
10096 * @param pTmpRsp Pointer to the temporary stack pointer.
10097 */
10098IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10099{
10100 /* Increment the stack pointer. */
10101 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10102 RTUINT64U NewRsp = *pTmpRsp;
10103 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10104
10105 /* Write the word the lazy way. */
10106 uint64_t const *pu64Src;
10107 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10108 if (rcStrict == VINF_SUCCESS)
10109 {
10110 *pu64Value = *pu64Src;
10111 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10112
10113 /* Commit the new RSP value. */
10114 if (rcStrict == VINF_SUCCESS)
10115 *pTmpRsp = NewRsp;
10116 }
10117
10118 return rcStrict;
10119}
10120
10121
10122/**
10123 * Begin a special stack push (used by interrupt, exceptions and such).
10124 *
10125 * This will raise \#SS or \#PF if appropriate.
10126 *
10127 * @returns Strict VBox status code.
10128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10129 * @param cbMem The number of bytes to push onto the stack.
10130 * @param ppvMem Where to return the pointer to the stack memory.
10131 * As with the other memory functions this could be
10132 * direct access or bounce buffered access, so
10133 * don't commit register until the commit call
10134 * succeeds.
10135 * @param puNewRsp Where to return the new RSP value. This must be
10136 * passed unchanged to
10137 * iemMemStackPushCommitSpecial().
10138 */
10139IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10140{
10141 Assert(cbMem < UINT8_MAX);
10142 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10143 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10144 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10145}
10146
10147
10148/**
10149 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10150 *
10151 * This will update the rSP.
10152 *
10153 * @returns Strict VBox status code.
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param pvMem The pointer returned by
10156 * iemMemStackPushBeginSpecial().
10157 * @param uNewRsp The new RSP value returned by
10158 * iemMemStackPushBeginSpecial().
10159 */
10160IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10161{
10162 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10163 if (rcStrict == VINF_SUCCESS)
10164 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10165 return rcStrict;
10166}
10167
10168
10169/**
10170 * Begin a special stack pop (used by iret, retf and such).
10171 *
10172 * This will raise \#SS or \#PF if appropriate.
10173 *
10174 * @returns Strict VBox status code.
10175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10176 * @param cbMem The number of bytes to pop from the stack.
10177 * @param ppvMem Where to return the pointer to the stack memory.
10178 * @param puNewRsp Where to return the new RSP value. This must be
10179 * assigned to CPUMCTX::rsp manually some time
10180 * after iemMemStackPopDoneSpecial() has been
10181 * called.
10182 */
10183IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10184{
10185 Assert(cbMem < UINT8_MAX);
10186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10187 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10188 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10189}
10190
10191
10192/**
10193 * Continue a special stack pop (used by iret and retf).
10194 *
10195 * This will raise \#SS or \#PF if appropriate.
10196 *
10197 * @returns Strict VBox status code.
10198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10199 * @param cbMem The number of bytes to pop from the stack.
10200 * @param ppvMem Where to return the pointer to the stack memory.
10201 * @param puNewRsp Where to return the new RSP value. This must be
10202 * assigned to CPUMCTX::rsp manually some time
10203 * after iemMemStackPopDoneSpecial() has been
10204 * called.
10205 */
10206IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10207{
10208 Assert(cbMem < UINT8_MAX);
10209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10210 RTUINT64U NewRsp;
10211 NewRsp.u = *puNewRsp;
10212 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10213 *puNewRsp = NewRsp.u;
10214 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10215}
10216
10217
10218/**
10219 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10220 * iemMemStackPopContinueSpecial).
10221 *
10222 * The caller will manually commit the rSP.
10223 *
10224 * @returns Strict VBox status code.
10225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10226 * @param pvMem The pointer returned by
10227 * iemMemStackPopBeginSpecial() or
10228 * iemMemStackPopContinueSpecial().
10229 */
10230IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10231{
10232 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10233}
10234
10235
10236/**
10237 * Fetches a system table byte.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param pbDst Where to return the byte.
10242 * @param iSegReg The index of the segment register to use for
10243 * this access. The base and limits are checked.
10244 * @param GCPtrMem The address of the guest memory.
10245 */
10246IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10247{
10248 /* The lazy approach for now... */
10249 uint8_t const *pbSrc;
10250 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10251 if (rc == VINF_SUCCESS)
10252 {
10253 *pbDst = *pbSrc;
10254 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10255 }
10256 return rc;
10257}
10258
10259
10260/**
10261 * Fetches a system table word.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param pu16Dst Where to return the word.
10266 * @param iSegReg The index of the segment register to use for
10267 * this access. The base and limits are checked.
10268 * @param GCPtrMem The address of the guest memory.
10269 */
10270IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10271{
10272 /* The lazy approach for now... */
10273 uint16_t const *pu16Src;
10274 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10275 if (rc == VINF_SUCCESS)
10276 {
10277 *pu16Dst = *pu16Src;
10278 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10279 }
10280 return rc;
10281}
10282
10283
10284/**
10285 * Fetches a system table dword.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10289 * @param pu32Dst Where to return the dword.
10290 * @param iSegReg The index of the segment register to use for
10291 * this access. The base and limits are checked.
10292 * @param GCPtrMem The address of the guest memory.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10295{
10296 /* The lazy approach for now... */
10297 uint32_t const *pu32Src;
10298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10299 if (rc == VINF_SUCCESS)
10300 {
10301 *pu32Dst = *pu32Src;
10302 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10303 }
10304 return rc;
10305}
10306
10307
10308/**
10309 * Fetches a system table qword.
10310 *
10311 * @returns Strict VBox status code.
10312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10313 * @param pu64Dst Where to return the qword.
10314 * @param iSegReg The index of the segment register to use for
10315 * this access. The base and limits are checked.
10316 * @param GCPtrMem The address of the guest memory.
10317 */
10318IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10319{
10320 /* The lazy approach for now... */
10321 uint64_t const *pu64Src;
10322 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10323 if (rc == VINF_SUCCESS)
10324 {
10325 *pu64Dst = *pu64Src;
10326 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10327 }
10328 return rc;
10329}
10330
10331
10332/**
10333 * Fetches a descriptor table entry with caller specified error code.
10334 *
10335 * @returns Strict VBox status code.
10336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10337 * @param pDesc Where to return the descriptor table entry.
10338 * @param uSel The selector which table entry to fetch.
10339 * @param uXcpt The exception to raise on table lookup error.
10340 * @param uErrorCode The error code associated with the exception.
10341 */
10342IEM_STATIC VBOXSTRICTRC
10343iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10344{
10345 AssertPtr(pDesc);
10346 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10347
10348 /** @todo did the 286 require all 8 bytes to be accessible? */
10349 /*
10350 * Get the selector table base and check bounds.
10351 */
10352 RTGCPTR GCPtrBase;
10353 if (uSel & X86_SEL_LDT)
10354 {
10355 if ( !pCtx->ldtr.Attr.n.u1Present
10356 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10357 {
10358 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10359 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10360 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10361 uErrorCode, 0);
10362 }
10363
10364 Assert(pCtx->ldtr.Attr.n.u1Present);
10365 GCPtrBase = pCtx->ldtr.u64Base;
10366 }
10367 else
10368 {
10369 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10370 {
10371 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10372 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10373 uErrorCode, 0);
10374 }
10375 GCPtrBase = pCtx->gdtr.pGdt;
10376 }
10377
10378 /*
10379 * Read the legacy descriptor and maybe the long mode extensions if
10380 * required.
10381 */
10382 VBOXSTRICTRC rcStrict;
10383 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10384 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10385 else
10386 {
10387 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10388 if (rcStrict == VINF_SUCCESS)
10389 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10390 if (rcStrict == VINF_SUCCESS)
10391 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10392 if (rcStrict == VINF_SUCCESS)
10393 pDesc->Legacy.au16[3] = 0;
10394 else
10395 return rcStrict;
10396 }
10397
10398 if (rcStrict == VINF_SUCCESS)
10399 {
10400 if ( !IEM_IS_LONG_MODE(pVCpu)
10401 || pDesc->Legacy.Gen.u1DescType)
10402 pDesc->Long.au64[1] = 0;
10403 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10404 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10405 else
10406 {
10407 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10408 /** @todo is this the right exception? */
10409 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10410 }
10411 }
10412 return rcStrict;
10413}
10414
10415
10416/**
10417 * Fetches a descriptor table entry.
10418 *
10419 * @returns Strict VBox status code.
10420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10421 * @param pDesc Where to return the descriptor table entry.
10422 * @param uSel The selector which table entry to fetch.
10423 * @param uXcpt The exception to raise on table lookup error.
10424 */
10425IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10426{
10427 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10428}
10429
10430
10431/**
10432 * Fakes a long mode stack selector for SS = 0.
10433 *
10434 * @param pDescSs Where to return the fake stack descriptor.
10435 * @param uDpl The DPL we want.
10436 */
10437IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10438{
10439 pDescSs->Long.au64[0] = 0;
10440 pDescSs->Long.au64[1] = 0;
10441 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10442 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10443 pDescSs->Long.Gen.u2Dpl = uDpl;
10444 pDescSs->Long.Gen.u1Present = 1;
10445 pDescSs->Long.Gen.u1Long = 1;
10446}
10447
10448
10449/**
10450 * Marks the selector descriptor as accessed (only non-system descriptors).
10451 *
10452 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10453 * will therefore skip the limit checks.
10454 *
10455 * @returns Strict VBox status code.
10456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10457 * @param uSel The selector.
10458 */
10459IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10460{
10461 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10462
10463 /*
10464 * Get the selector table base and calculate the entry address.
10465 */
10466 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10467 ? pCtx->ldtr.u64Base
10468 : pCtx->gdtr.pGdt;
10469 GCPtr += uSel & X86_SEL_MASK;
10470
10471 /*
10472 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10473 * ugly stuff to avoid this. This will make sure it's an atomic access
10474 * as well more or less remove any question about 8-bit or 32-bit accesss.
10475 */
10476 VBOXSTRICTRC rcStrict;
10477 uint32_t volatile *pu32;
10478 if ((GCPtr & 3) == 0)
10479 {
10480 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10481 GCPtr += 2 + 2;
10482 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10483 if (rcStrict != VINF_SUCCESS)
10484 return rcStrict;
10485 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10486 }
10487 else
10488 {
10489 /* The misaligned GDT/LDT case, map the whole thing. */
10490 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10491 if (rcStrict != VINF_SUCCESS)
10492 return rcStrict;
10493 switch ((uintptr_t)pu32 & 3)
10494 {
10495 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10496 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10497 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10498 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10499 }
10500 }
10501
10502 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10503}
10504
10505/** @} */
10506
10507
10508/*
10509 * Include the C/C++ implementation of instruction.
10510 */
10511#include "IEMAllCImpl.cpp.h"
10512
10513
10514
10515/** @name "Microcode" macros.
10516 *
10517 * The idea is that we should be able to use the same code to interpret
10518 * instructions as well as recompiler instructions. Thus this obfuscation.
10519 *
10520 * @{
10521 */
10522#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10523#define IEM_MC_END() }
10524#define IEM_MC_PAUSE() do {} while (0)
10525#define IEM_MC_CONTINUE() do {} while (0)
10526
10527/** Internal macro. */
10528#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10529 do \
10530 { \
10531 VBOXSTRICTRC rcStrict2 = a_Expr; \
10532 if (rcStrict2 != VINF_SUCCESS) \
10533 return rcStrict2; \
10534 } while (0)
10535
10536
10537#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10538#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10539#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10540#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10541#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10542#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10543#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10544#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10545#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10546 do { \
10547 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10548 return iemRaiseDeviceNotAvailable(pVCpu); \
10549 } while (0)
10550#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10551 do { \
10552 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10553 return iemRaiseDeviceNotAvailable(pVCpu); \
10554 } while (0)
10555#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10556 do { \
10557 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10558 return iemRaiseMathFault(pVCpu); \
10559 } while (0)
10560#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10561 do { \
10562 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10563 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10564 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10565 return iemRaiseUndefinedOpcode(pVCpu); \
10566 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10567 return iemRaiseDeviceNotAvailable(pVCpu); \
10568 } while (0)
10569#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10570 do { \
10571 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10572 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10573 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10574 return iemRaiseUndefinedOpcode(pVCpu); \
10575 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10576 return iemRaiseDeviceNotAvailable(pVCpu); \
10577 } while (0)
10578#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10579 do { \
10580 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10581 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10582 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10583 return iemRaiseUndefinedOpcode(pVCpu); \
10584 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10585 return iemRaiseDeviceNotAvailable(pVCpu); \
10586 } while (0)
10587#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10588 do { \
10589 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10590 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10591 return iemRaiseUndefinedOpcode(pVCpu); \
10592 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10593 return iemRaiseDeviceNotAvailable(pVCpu); \
10594 } while (0)
10595#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10596 do { \
10597 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10598 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10599 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10600 return iemRaiseUndefinedOpcode(pVCpu); \
10601 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10602 return iemRaiseDeviceNotAvailable(pVCpu); \
10603 } while (0)
10604#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10605 do { \
10606 if (pVCpu->iem.s.uCpl != 0) \
10607 return iemRaiseGeneralProtectionFault0(pVCpu); \
10608 } while (0)
10609#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10610 do { \
10611 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10612 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10613 } while (0)
10614
10615
10616#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10617#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10618#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10619#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10620#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10621#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10622#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10623 uint32_t a_Name; \
10624 uint32_t *a_pName = &a_Name
10625#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10626 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10627
10628#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10629#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10630
10631#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10632#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10633#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10634#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10635#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10636#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10637#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10638#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10639#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10640#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10641#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10642#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10643#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10644#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10645#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10646#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10647#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10648#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10649#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10650#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10651#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10652#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10653#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10654#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10655#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10656#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10657#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10658#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10659#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10660/** @note Not for IOPL or IF testing or modification. */
10661#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10662#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10663#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10664#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10665
10666#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10667#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10668#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10669#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10670#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10671#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10672#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10673#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10674#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10675#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10676#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10677 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10678
10679#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10680#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10681/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10682 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10683#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10684#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10685/** @note Not for IOPL or IF testing or modification. */
10686#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10687
10688#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10689#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10690#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10691 do { \
10692 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10693 *pu32Reg += (a_u32Value); \
10694 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10695 } while (0)
10696#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10697
10698#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10699#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10700#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10701 do { \
10702 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10703 *pu32Reg -= (a_u32Value); \
10704 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10705 } while (0)
10706#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10707#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10708
10709#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10710#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10711#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10712#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10713#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10714#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10715#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10716
10717#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10718#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10719#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10720#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10721
10722#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10723#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10724#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10725
10726#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10727#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10728#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10729
10730#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10731#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10732#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10733
10734#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10735#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10736#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10737
10738#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10739
10740#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10741
10742#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10743#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10744#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10745 do { \
10746 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10747 *pu32Reg &= (a_u32Value); \
10748 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10749 } while (0)
10750#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10751
10752#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10753#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10754#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10755 do { \
10756 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10757 *pu32Reg |= (a_u32Value); \
10758 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10759 } while (0)
10760#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10761
10762
10763/** @note Not for IOPL or IF modification. */
10764#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10765/** @note Not for IOPL or IF modification. */
10766#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10767/** @note Not for IOPL or IF modification. */
10768#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10769
10770#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10771
10772
10773#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10774 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10775#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10776 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10777#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10778 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10779#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10780 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10781#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10782 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10783#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10784 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10785#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10786 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10787
10788#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10789 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
10790 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
10791 } while (0)
10792#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10793 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10794#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10795 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10796#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
10797 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
10798#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10799 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
10800 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
10801 } while (0)
10802#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10803 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10804#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10805 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10806 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10807 } while (0)
10808#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
10809 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
10810#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10811 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10812 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10813 } while (0)
10814#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10815 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10816#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10817 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
10818#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10819 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10820#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10821 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
10822 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
10823 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
10824 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
10825 } while (0)
10826
10827#ifndef IEM_WITH_SETJMP
10828# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10830# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10832# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10834#else
10835# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10836 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10837# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10838 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10839# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10840 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10841#endif
10842
10843#ifndef IEM_WITH_SETJMP
10844# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10846# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10847 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10848# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10850#else
10851# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10852 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10853# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10854 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10855# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10856 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10857#endif
10858
10859#ifndef IEM_WITH_SETJMP
10860# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10862# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10864# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10865 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10866#else
10867# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10868 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10869# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10870 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10871# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10872 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10873#endif
10874
10875#ifdef SOME_UNUSED_FUNCTION
10876# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10877 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10878#endif
10879
10880#ifndef IEM_WITH_SETJMP
10881# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10882 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10883# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10884 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10885# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10887# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10889#else
10890# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10891 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10892# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10893 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10894# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10895 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10896# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10897 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10898#endif
10899
10900#ifndef IEM_WITH_SETJMP
10901# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10902 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10903# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10904 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10905# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10906 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10907#else
10908# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10909 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10910# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10911 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10912# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10913 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10914#endif
10915
10916#ifndef IEM_WITH_SETJMP
10917# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10918 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10919# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10920 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10921#else
10922# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10923 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10924# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10925 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10926#endif
10927
10928
10929
10930#ifndef IEM_WITH_SETJMP
10931# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10932 do { \
10933 uint8_t u8Tmp; \
10934 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10935 (a_u16Dst) = u8Tmp; \
10936 } while (0)
10937# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10938 do { \
10939 uint8_t u8Tmp; \
10940 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10941 (a_u32Dst) = u8Tmp; \
10942 } while (0)
10943# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10944 do { \
10945 uint8_t u8Tmp; \
10946 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10947 (a_u64Dst) = u8Tmp; \
10948 } while (0)
10949# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10950 do { \
10951 uint16_t u16Tmp; \
10952 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10953 (a_u32Dst) = u16Tmp; \
10954 } while (0)
10955# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10956 do { \
10957 uint16_t u16Tmp; \
10958 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10959 (a_u64Dst) = u16Tmp; \
10960 } while (0)
10961# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10962 do { \
10963 uint32_t u32Tmp; \
10964 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10965 (a_u64Dst) = u32Tmp; \
10966 } while (0)
10967#else /* IEM_WITH_SETJMP */
10968# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10969 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10970# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10971 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10972# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10973 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10974# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10975 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10976# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10977 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10978# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10979 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10980#endif /* IEM_WITH_SETJMP */
10981
10982#ifndef IEM_WITH_SETJMP
10983# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10984 do { \
10985 uint8_t u8Tmp; \
10986 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10987 (a_u16Dst) = (int8_t)u8Tmp; \
10988 } while (0)
10989# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10990 do { \
10991 uint8_t u8Tmp; \
10992 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10993 (a_u32Dst) = (int8_t)u8Tmp; \
10994 } while (0)
10995# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10996 do { \
10997 uint8_t u8Tmp; \
10998 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10999 (a_u64Dst) = (int8_t)u8Tmp; \
11000 } while (0)
11001# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11002 do { \
11003 uint16_t u16Tmp; \
11004 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11005 (a_u32Dst) = (int16_t)u16Tmp; \
11006 } while (0)
11007# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11008 do { \
11009 uint16_t u16Tmp; \
11010 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11011 (a_u64Dst) = (int16_t)u16Tmp; \
11012 } while (0)
11013# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11014 do { \
11015 uint32_t u32Tmp; \
11016 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11017 (a_u64Dst) = (int32_t)u32Tmp; \
11018 } while (0)
11019#else /* IEM_WITH_SETJMP */
11020# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11021 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11022# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11023 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11024# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11025 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11026# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11027 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11028# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11029 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11030# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11031 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11032#endif /* IEM_WITH_SETJMP */
11033
11034#ifndef IEM_WITH_SETJMP
11035# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11036 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11037# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11038 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11039# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11040 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11041# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11042 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11043#else
11044# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11045 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11046# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11047 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11048# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11049 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11050# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11051 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11052#endif
11053
11054#ifndef IEM_WITH_SETJMP
11055# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11056 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11057# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11058 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11059# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11060 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11061# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11062 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11063#else
11064# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11065 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11066# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11067 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11068# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11069 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11070# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11071 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11072#endif
11073
11074#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11075#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11076#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11077#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11078#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11079#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11080#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11081 do { \
11082 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11083 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11084 } while (0)
11085
11086#ifndef IEM_WITH_SETJMP
11087# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11088 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11089# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11090 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11091#else
11092# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11093 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11094# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11095 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11096#endif
11097
11098
11099#define IEM_MC_PUSH_U16(a_u16Value) \
11100 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11101#define IEM_MC_PUSH_U32(a_u32Value) \
11102 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11103#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11104 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11105#define IEM_MC_PUSH_U64(a_u64Value) \
11106 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11107
11108#define IEM_MC_POP_U16(a_pu16Value) \
11109 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11110#define IEM_MC_POP_U32(a_pu32Value) \
11111 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11112#define IEM_MC_POP_U64(a_pu64Value) \
11113 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11114
11115/** Maps guest memory for direct or bounce buffered access.
11116 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11117 * @remarks May return.
11118 */
11119#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11120 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11121
11122/** Maps guest memory for direct or bounce buffered access.
11123 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11124 * @remarks May return.
11125 */
11126#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11127 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11128
11129/** Commits the memory and unmaps the guest memory.
11130 * @remarks May return.
11131 */
11132#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11133 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11134
11135/** Commits the memory and unmaps the guest memory unless the FPU status word
11136 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11137 * that would cause FLD not to store.
11138 *
11139 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11140 * store, while \#P will not.
11141 *
11142 * @remarks May in theory return - for now.
11143 */
11144#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11145 do { \
11146 if ( !(a_u16FSW & X86_FSW_ES) \
11147 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11148 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11149 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11150 } while (0)
11151
11152/** Calculate efficient address from R/M. */
11153#ifndef IEM_WITH_SETJMP
11154# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11155 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11156#else
11157# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11158 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11159#endif
11160
11161#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11162#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11163#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11164#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11165#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11166#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11167#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11168
11169/**
11170 * Defers the rest of the instruction emulation to a C implementation routine
11171 * and returns, only taking the standard parameters.
11172 *
11173 * @param a_pfnCImpl The pointer to the C routine.
11174 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11175 */
11176#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11177
11178/**
11179 * Defers the rest of instruction emulation to a C implementation routine and
11180 * returns, taking one argument in addition to the standard ones.
11181 *
11182 * @param a_pfnCImpl The pointer to the C routine.
11183 * @param a0 The argument.
11184 */
11185#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11186
11187/**
11188 * Defers the rest of the instruction emulation to a C implementation routine
11189 * and returns, taking two arguments in addition to the standard ones.
11190 *
11191 * @param a_pfnCImpl The pointer to the C routine.
11192 * @param a0 The first extra argument.
11193 * @param a1 The second extra argument.
11194 */
11195#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11196
11197/**
11198 * Defers the rest of the instruction emulation to a C implementation routine
11199 * and returns, taking three arguments in addition to the standard ones.
11200 *
11201 * @param a_pfnCImpl The pointer to the C routine.
11202 * @param a0 The first extra argument.
11203 * @param a1 The second extra argument.
11204 * @param a2 The third extra argument.
11205 */
11206#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11207
11208/**
11209 * Defers the rest of the instruction emulation to a C implementation routine
11210 * and returns, taking four arguments in addition to the standard ones.
11211 *
11212 * @param a_pfnCImpl The pointer to the C routine.
11213 * @param a0 The first extra argument.
11214 * @param a1 The second extra argument.
11215 * @param a2 The third extra argument.
11216 * @param a3 The fourth extra argument.
11217 */
11218#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11219
11220/**
11221 * Defers the rest of the instruction emulation to a C implementation routine
11222 * and returns, taking two arguments in addition to the standard ones.
11223 *
11224 * @param a_pfnCImpl The pointer to the C routine.
11225 * @param a0 The first extra argument.
11226 * @param a1 The second extra argument.
11227 * @param a2 The third extra argument.
11228 * @param a3 The fourth extra argument.
11229 * @param a4 The fifth extra argument.
11230 */
11231#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11232
11233/**
11234 * Defers the entire instruction emulation to a C implementation routine and
11235 * returns, only taking the standard parameters.
11236 *
11237 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11238 *
11239 * @param a_pfnCImpl The pointer to the C routine.
11240 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11241 */
11242#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11243
11244/**
11245 * Defers the entire instruction emulation to a C implementation routine and
11246 * returns, taking one argument in addition to the standard ones.
11247 *
11248 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11249 *
11250 * @param a_pfnCImpl The pointer to the C routine.
11251 * @param a0 The argument.
11252 */
11253#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11254
11255/**
11256 * Defers the entire instruction emulation to a C implementation routine and
11257 * returns, taking two arguments in addition to the standard ones.
11258 *
11259 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11260 *
11261 * @param a_pfnCImpl The pointer to the C routine.
11262 * @param a0 The first extra argument.
11263 * @param a1 The second extra argument.
11264 */
11265#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11266
11267/**
11268 * Defers the entire instruction emulation to a C implementation routine and
11269 * returns, taking three arguments in addition to the standard ones.
11270 *
11271 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11272 *
11273 * @param a_pfnCImpl The pointer to the C routine.
11274 * @param a0 The first extra argument.
11275 * @param a1 The second extra argument.
11276 * @param a2 The third extra argument.
11277 */
11278#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11279
11280/**
11281 * Calls a FPU assembly implementation taking one visible argument.
11282 *
11283 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11284 * @param a0 The first extra argument.
11285 */
11286#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11287 do { \
11288 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11289 } while (0)
11290
11291/**
11292 * Calls a FPU assembly implementation taking two visible arguments.
11293 *
11294 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11295 * @param a0 The first extra argument.
11296 * @param a1 The second extra argument.
11297 */
11298#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11299 do { \
11300 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11301 } while (0)
11302
11303/**
11304 * Calls a FPU assembly implementation taking three visible arguments.
11305 *
11306 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11307 * @param a0 The first extra argument.
11308 * @param a1 The second extra argument.
11309 * @param a2 The third extra argument.
11310 */
11311#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11312 do { \
11313 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11314 } while (0)
11315
11316#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11317 do { \
11318 (a_FpuData).FSW = (a_FSW); \
11319 (a_FpuData).r80Result = *(a_pr80Value); \
11320 } while (0)
11321
11322/** Pushes FPU result onto the stack. */
11323#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11324 iemFpuPushResult(pVCpu, &a_FpuData)
11325/** Pushes FPU result onto the stack and sets the FPUDP. */
11326#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11327 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11328
11329/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11330#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11331 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11332
11333/** Stores FPU result in a stack register. */
11334#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11335 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11336/** Stores FPU result in a stack register and pops the stack. */
11337#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11338 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11339/** Stores FPU result in a stack register and sets the FPUDP. */
11340#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11341 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11342/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11343 * stack. */
11344#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11345 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11346
11347/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11348#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11349 iemFpuUpdateOpcodeAndIp(pVCpu)
11350/** Free a stack register (for FFREE and FFREEP). */
11351#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11352 iemFpuStackFree(pVCpu, a_iStReg)
11353/** Increment the FPU stack pointer. */
11354#define IEM_MC_FPU_STACK_INC_TOP() \
11355 iemFpuStackIncTop(pVCpu)
11356/** Decrement the FPU stack pointer. */
11357#define IEM_MC_FPU_STACK_DEC_TOP() \
11358 iemFpuStackDecTop(pVCpu)
11359
11360/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11361#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11362 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11363/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11364#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11365 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11366/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11367#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11368 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11369/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11370#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11371 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11372/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11373 * stack. */
11374#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11375 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11376/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11377#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11378 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11379
11380/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11381#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11382 iemFpuStackUnderflow(pVCpu, a_iStDst)
11383/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11384 * stack. */
11385#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11386 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11387/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11388 * FPUDS. */
11389#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11390 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11391/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11392 * FPUDS. Pops stack. */
11393#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11394 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11395/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11396 * stack twice. */
11397#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11398 iemFpuStackUnderflowThenPopPop(pVCpu)
11399/** Raises a FPU stack underflow exception for an instruction pushing a result
11400 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11401#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11402 iemFpuStackPushUnderflow(pVCpu)
11403/** Raises a FPU stack underflow exception for an instruction pushing a result
11404 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11405#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11406 iemFpuStackPushUnderflowTwo(pVCpu)
11407
11408/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11409 * FPUIP, FPUCS and FOP. */
11410#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11411 iemFpuStackPushOverflow(pVCpu)
11412/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11413 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11414#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11415 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11416/** Prepares for using the FPU state.
11417 * Ensures that we can use the host FPU in the current context (RC+R0.
11418 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11419#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11420/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11421#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11422/** Actualizes the guest FPU state so it can be accessed and modified. */
11423#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11424
11425/** Prepares for using the SSE state.
11426 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11427 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11428#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11429/** Actualizes the guest XMM0..15 register state for read-only access. */
11430#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11431/** Actualizes the guest XMM0..15 register state for read-write access. */
11432#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11433
11434/**
11435 * Calls a MMX assembly implementation taking two visible arguments.
11436 *
11437 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11438 * @param a0 The first extra argument.
11439 * @param a1 The second extra argument.
11440 */
11441#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11442 do { \
11443 IEM_MC_PREPARE_FPU_USAGE(); \
11444 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11445 } while (0)
11446
11447/**
11448 * Calls a MMX assembly implementation taking three visible arguments.
11449 *
11450 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11451 * @param a0 The first extra argument.
11452 * @param a1 The second extra argument.
11453 * @param a2 The third extra argument.
11454 */
11455#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11456 do { \
11457 IEM_MC_PREPARE_FPU_USAGE(); \
11458 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11459 } while (0)
11460
11461
11462/**
11463 * Calls a SSE assembly implementation taking two visible arguments.
11464 *
11465 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11466 * @param a0 The first extra argument.
11467 * @param a1 The second extra argument.
11468 */
11469#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11470 do { \
11471 IEM_MC_PREPARE_SSE_USAGE(); \
11472 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11473 } while (0)
11474
11475/**
11476 * Calls a SSE assembly implementation taking three visible arguments.
11477 *
11478 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11479 * @param a0 The first extra argument.
11480 * @param a1 The second extra argument.
11481 * @param a2 The third extra argument.
11482 */
11483#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11484 do { \
11485 IEM_MC_PREPARE_SSE_USAGE(); \
11486 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11487 } while (0)
11488
11489/** @note Not for IOPL or IF testing. */
11490#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11491/** @note Not for IOPL or IF testing. */
11492#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11493/** @note Not for IOPL or IF testing. */
11494#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11495/** @note Not for IOPL or IF testing. */
11496#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11497/** @note Not for IOPL or IF testing. */
11498#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11499 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11500 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11501/** @note Not for IOPL or IF testing. */
11502#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11503 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11504 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11505/** @note Not for IOPL or IF testing. */
11506#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11507 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11508 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11509 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11510/** @note Not for IOPL or IF testing. */
11511#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11512 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11513 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11514 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11515#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11516#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11517#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11518/** @note Not for IOPL or IF testing. */
11519#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11520 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11521 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11522/** @note Not for IOPL or IF testing. */
11523#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11524 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11525 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11526/** @note Not for IOPL or IF testing. */
11527#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11528 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11529 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11530/** @note Not for IOPL or IF testing. */
11531#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11532 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11533 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11534/** @note Not for IOPL or IF testing. */
11535#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11536 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11537 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11538/** @note Not for IOPL or IF testing. */
11539#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11540 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11541 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11542#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11543#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11544
11545#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11546 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11547#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11548 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11549#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11550 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11551#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11552 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11553#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11554 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11555#define IEM_MC_IF_FCW_IM() \
11556 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11557
11558#define IEM_MC_ELSE() } else {
11559#define IEM_MC_ENDIF() } do {} while (0)
11560
11561/** @} */
11562
11563
11564/** @name Opcode Debug Helpers.
11565 * @{
11566 */
11567#ifdef VBOX_WITH_STATISTICS
11568# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11569#else
11570# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11571#endif
11572
11573#ifdef DEBUG
11574# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11575 do { \
11576 IEMOP_INC_STATS(a_Stats); \
11577 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11578 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11579 } while (0)
11580
11581# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11582 do { \
11583 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11584 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11585 (void)RT_CONCAT(OP_,a_Upper); \
11586 (void)(a_fDisHints); \
11587 (void)(a_fIemHints); \
11588 } while (0)
11589
11590# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11591 do { \
11592 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11593 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11594 (void)RT_CONCAT(OP_,a_Upper); \
11595 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11596 (void)(a_fDisHints); \
11597 (void)(a_fIemHints); \
11598 } while (0)
11599
11600# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11601 do { \
11602 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11603 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11604 (void)RT_CONCAT(OP_,a_Upper); \
11605 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11606 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11607 (void)(a_fDisHints); \
11608 (void)(a_fIemHints); \
11609 } while (0)
11610
11611# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11612 do { \
11613 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11614 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11615 (void)RT_CONCAT(OP_,a_Upper); \
11616 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11617 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11618 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11619 (void)(a_fDisHints); \
11620 (void)(a_fIemHints); \
11621 } while (0)
11622
11623# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11624 do { \
11625 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11626 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11627 (void)RT_CONCAT(OP_,a_Upper); \
11628 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11629 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11630 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11631 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11632 (void)(a_fDisHints); \
11633 (void)(a_fIemHints); \
11634 } while (0)
11635
11636#else
11637# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11638
11639# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11640 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11641# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11642 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11643# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11644 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11645# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11646 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11647# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11648 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11649
11650#endif
11651
11652#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11653 IEMOP_MNEMONIC0EX(a_Lower, \
11654 #a_Lower, \
11655 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
11656#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11657 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
11658 #a_Lower " " #a_Op1, \
11659 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
11660#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11661 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
11662 #a_Lower " " #a_Op1 "," #a_Op2, \
11663 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
11664#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11665 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
11666 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
11667 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
11668#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11669 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
11670 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
11671 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
11672
11673/** @} */
11674
11675
11676/** @name Opcode Helpers.
11677 * @{
11678 */
11679
11680#ifdef IN_RING3
11681# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11682 do { \
11683 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11684 else \
11685 { \
11686 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11687 return IEMOP_RAISE_INVALID_OPCODE(); \
11688 } \
11689 } while (0)
11690#else
11691# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11692 do { \
11693 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11694 else return IEMOP_RAISE_INVALID_OPCODE(); \
11695 } while (0)
11696#endif
11697
11698/** The instruction requires a 186 or later. */
11699#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11700# define IEMOP_HLP_MIN_186() do { } while (0)
11701#else
11702# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11703#endif
11704
11705/** The instruction requires a 286 or later. */
11706#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11707# define IEMOP_HLP_MIN_286() do { } while (0)
11708#else
11709# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11710#endif
11711
11712/** The instruction requires a 386 or later. */
11713#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11714# define IEMOP_HLP_MIN_386() do { } while (0)
11715#else
11716# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11717#endif
11718
11719/** The instruction requires a 386 or later if the given expression is true. */
11720#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11721# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11722#else
11723# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11724#endif
11725
11726/** The instruction requires a 486 or later. */
11727#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11728# define IEMOP_HLP_MIN_486() do { } while (0)
11729#else
11730# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11731#endif
11732
11733/** The instruction requires a Pentium (586) or later. */
11734#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11735# define IEMOP_HLP_MIN_586() do { } while (0)
11736#else
11737# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11738#endif
11739
11740/** The instruction requires a PentiumPro (686) or later. */
11741#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11742# define IEMOP_HLP_MIN_686() do { } while (0)
11743#else
11744# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11745#endif
11746
11747
11748/** The instruction raises an \#UD in real and V8086 mode. */
11749#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11750 do \
11751 { \
11752 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11753 return IEMOP_RAISE_INVALID_OPCODE(); \
11754 } while (0)
11755
11756#if 0
11757#ifdef VBOX_WITH_NESTED_HWVIRT
11758/** The instruction raises an \#UD when SVM is not enabled. */
11759#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
11760 do \
11761 { \
11762 if (IEM_IS_SVM_ENABLED(pVCpu)) \
11763 return IEMOP_RAISE_INVALID_OPCODE(); \
11764 } while (0)
11765#endif
11766#endif
11767
11768/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11769 * 64-bit mode. */
11770#define IEMOP_HLP_NO_64BIT() \
11771 do \
11772 { \
11773 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11774 return IEMOP_RAISE_INVALID_OPCODE(); \
11775 } while (0)
11776
11777/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11778 * 64-bit mode. */
11779#define IEMOP_HLP_ONLY_64BIT() \
11780 do \
11781 { \
11782 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11783 return IEMOP_RAISE_INVALID_OPCODE(); \
11784 } while (0)
11785
11786/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11787#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11788 do \
11789 { \
11790 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11791 iemRecalEffOpSize64Default(pVCpu); \
11792 } while (0)
11793
11794/** The instruction has 64-bit operand size if 64-bit mode. */
11795#define IEMOP_HLP_64BIT_OP_SIZE() \
11796 do \
11797 { \
11798 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11799 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11800 } while (0)
11801
11802/** Only a REX prefix immediately preceeding the first opcode byte takes
11803 * effect. This macro helps ensuring this as well as logging bad guest code. */
11804#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11805 do \
11806 { \
11807 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11808 { \
11809 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11810 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11811 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11812 pVCpu->iem.s.uRexB = 0; \
11813 pVCpu->iem.s.uRexIndex = 0; \
11814 pVCpu->iem.s.uRexReg = 0; \
11815 iemRecalEffOpSize(pVCpu); \
11816 } \
11817 } while (0)
11818
11819/**
11820 * Done decoding.
11821 */
11822#define IEMOP_HLP_DONE_DECODING() \
11823 do \
11824 { \
11825 /*nothing for now, maybe later... */ \
11826 } while (0)
11827
11828/**
11829 * Done decoding, raise \#UD exception if lock prefix present.
11830 */
11831#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11832 do \
11833 { \
11834 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11835 { /* likely */ } \
11836 else \
11837 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11838 } while (0)
11839#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11840 do \
11841 { \
11842 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11843 { /* likely */ } \
11844 else \
11845 { \
11846 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11847 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11848 } \
11849 } while (0)
11850#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11851 do \
11852 { \
11853 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11854 { /* likely */ } \
11855 else \
11856 { \
11857 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11858 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11859 } \
11860 } while (0)
11861
11862/**
11863 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11864 * are present.
11865 */
11866#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11867 do \
11868 { \
11869 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11870 { /* likely */ } \
11871 else \
11872 return IEMOP_RAISE_INVALID_OPCODE(); \
11873 } while (0)
11874
11875
11876/**
11877 * Calculates the effective address of a ModR/M memory operand.
11878 *
11879 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11880 *
11881 * @return Strict VBox status code.
11882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11883 * @param bRm The ModRM byte.
11884 * @param cbImm The size of any immediate following the
11885 * effective address opcode bytes. Important for
11886 * RIP relative addressing.
11887 * @param pGCPtrEff Where to return the effective address.
11888 */
11889IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11890{
11891 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11892 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11893# define SET_SS_DEF() \
11894 do \
11895 { \
11896 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11897 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11898 } while (0)
11899
11900 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11901 {
11902/** @todo Check the effective address size crap! */
11903 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11904 {
11905 uint16_t u16EffAddr;
11906
11907 /* Handle the disp16 form with no registers first. */
11908 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11909 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11910 else
11911 {
11912 /* Get the displacment. */
11913 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11914 {
11915 case 0: u16EffAddr = 0; break;
11916 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11917 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11918 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11919 }
11920
11921 /* Add the base and index registers to the disp. */
11922 switch (bRm & X86_MODRM_RM_MASK)
11923 {
11924 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11925 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11926 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11927 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11928 case 4: u16EffAddr += pCtx->si; break;
11929 case 5: u16EffAddr += pCtx->di; break;
11930 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11931 case 7: u16EffAddr += pCtx->bx; break;
11932 }
11933 }
11934
11935 *pGCPtrEff = u16EffAddr;
11936 }
11937 else
11938 {
11939 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11940 uint32_t u32EffAddr;
11941
11942 /* Handle the disp32 form with no registers first. */
11943 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11944 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11945 else
11946 {
11947 /* Get the register (or SIB) value. */
11948 switch ((bRm & X86_MODRM_RM_MASK))
11949 {
11950 case 0: u32EffAddr = pCtx->eax; break;
11951 case 1: u32EffAddr = pCtx->ecx; break;
11952 case 2: u32EffAddr = pCtx->edx; break;
11953 case 3: u32EffAddr = pCtx->ebx; break;
11954 case 4: /* SIB */
11955 {
11956 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11957
11958 /* Get the index and scale it. */
11959 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11960 {
11961 case 0: u32EffAddr = pCtx->eax; break;
11962 case 1: u32EffAddr = pCtx->ecx; break;
11963 case 2: u32EffAddr = pCtx->edx; break;
11964 case 3: u32EffAddr = pCtx->ebx; break;
11965 case 4: u32EffAddr = 0; /*none */ break;
11966 case 5: u32EffAddr = pCtx->ebp; break;
11967 case 6: u32EffAddr = pCtx->esi; break;
11968 case 7: u32EffAddr = pCtx->edi; break;
11969 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11970 }
11971 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11972
11973 /* add base */
11974 switch (bSib & X86_SIB_BASE_MASK)
11975 {
11976 case 0: u32EffAddr += pCtx->eax; break;
11977 case 1: u32EffAddr += pCtx->ecx; break;
11978 case 2: u32EffAddr += pCtx->edx; break;
11979 case 3: u32EffAddr += pCtx->ebx; break;
11980 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11981 case 5:
11982 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11983 {
11984 u32EffAddr += pCtx->ebp;
11985 SET_SS_DEF();
11986 }
11987 else
11988 {
11989 uint32_t u32Disp;
11990 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11991 u32EffAddr += u32Disp;
11992 }
11993 break;
11994 case 6: u32EffAddr += pCtx->esi; break;
11995 case 7: u32EffAddr += pCtx->edi; break;
11996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11997 }
11998 break;
11999 }
12000 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12001 case 6: u32EffAddr = pCtx->esi; break;
12002 case 7: u32EffAddr = pCtx->edi; break;
12003 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12004 }
12005
12006 /* Get and add the displacement. */
12007 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12008 {
12009 case 0:
12010 break;
12011 case 1:
12012 {
12013 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12014 u32EffAddr += i8Disp;
12015 break;
12016 }
12017 case 2:
12018 {
12019 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12020 u32EffAddr += u32Disp;
12021 break;
12022 }
12023 default:
12024 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12025 }
12026
12027 }
12028 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12029 *pGCPtrEff = u32EffAddr;
12030 else
12031 {
12032 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12033 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12034 }
12035 }
12036 }
12037 else
12038 {
12039 uint64_t u64EffAddr;
12040
12041 /* Handle the rip+disp32 form with no registers first. */
12042 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12043 {
12044 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12045 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12046 }
12047 else
12048 {
12049 /* Get the register (or SIB) value. */
12050 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12051 {
12052 case 0: u64EffAddr = pCtx->rax; break;
12053 case 1: u64EffAddr = pCtx->rcx; break;
12054 case 2: u64EffAddr = pCtx->rdx; break;
12055 case 3: u64EffAddr = pCtx->rbx; break;
12056 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12057 case 6: u64EffAddr = pCtx->rsi; break;
12058 case 7: u64EffAddr = pCtx->rdi; break;
12059 case 8: u64EffAddr = pCtx->r8; break;
12060 case 9: u64EffAddr = pCtx->r9; break;
12061 case 10: u64EffAddr = pCtx->r10; break;
12062 case 11: u64EffAddr = pCtx->r11; break;
12063 case 13: u64EffAddr = pCtx->r13; break;
12064 case 14: u64EffAddr = pCtx->r14; break;
12065 case 15: u64EffAddr = pCtx->r15; break;
12066 /* SIB */
12067 case 4:
12068 case 12:
12069 {
12070 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12071
12072 /* Get the index and scale it. */
12073 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12074 {
12075 case 0: u64EffAddr = pCtx->rax; break;
12076 case 1: u64EffAddr = pCtx->rcx; break;
12077 case 2: u64EffAddr = pCtx->rdx; break;
12078 case 3: u64EffAddr = pCtx->rbx; break;
12079 case 4: u64EffAddr = 0; /*none */ break;
12080 case 5: u64EffAddr = pCtx->rbp; break;
12081 case 6: u64EffAddr = pCtx->rsi; break;
12082 case 7: u64EffAddr = pCtx->rdi; break;
12083 case 8: u64EffAddr = pCtx->r8; break;
12084 case 9: u64EffAddr = pCtx->r9; break;
12085 case 10: u64EffAddr = pCtx->r10; break;
12086 case 11: u64EffAddr = pCtx->r11; break;
12087 case 12: u64EffAddr = pCtx->r12; break;
12088 case 13: u64EffAddr = pCtx->r13; break;
12089 case 14: u64EffAddr = pCtx->r14; break;
12090 case 15: u64EffAddr = pCtx->r15; break;
12091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12092 }
12093 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12094
12095 /* add base */
12096 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12097 {
12098 case 0: u64EffAddr += pCtx->rax; break;
12099 case 1: u64EffAddr += pCtx->rcx; break;
12100 case 2: u64EffAddr += pCtx->rdx; break;
12101 case 3: u64EffAddr += pCtx->rbx; break;
12102 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12103 case 6: u64EffAddr += pCtx->rsi; break;
12104 case 7: u64EffAddr += pCtx->rdi; break;
12105 case 8: u64EffAddr += pCtx->r8; break;
12106 case 9: u64EffAddr += pCtx->r9; break;
12107 case 10: u64EffAddr += pCtx->r10; break;
12108 case 11: u64EffAddr += pCtx->r11; break;
12109 case 12: u64EffAddr += pCtx->r12; break;
12110 case 14: u64EffAddr += pCtx->r14; break;
12111 case 15: u64EffAddr += pCtx->r15; break;
12112 /* complicated encodings */
12113 case 5:
12114 case 13:
12115 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12116 {
12117 if (!pVCpu->iem.s.uRexB)
12118 {
12119 u64EffAddr += pCtx->rbp;
12120 SET_SS_DEF();
12121 }
12122 else
12123 u64EffAddr += pCtx->r13;
12124 }
12125 else
12126 {
12127 uint32_t u32Disp;
12128 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12129 u64EffAddr += (int32_t)u32Disp;
12130 }
12131 break;
12132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12133 }
12134 break;
12135 }
12136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12137 }
12138
12139 /* Get and add the displacement. */
12140 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12141 {
12142 case 0:
12143 break;
12144 case 1:
12145 {
12146 int8_t i8Disp;
12147 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12148 u64EffAddr += i8Disp;
12149 break;
12150 }
12151 case 2:
12152 {
12153 uint32_t u32Disp;
12154 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12155 u64EffAddr += (int32_t)u32Disp;
12156 break;
12157 }
12158 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12159 }
12160
12161 }
12162
12163 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12164 *pGCPtrEff = u64EffAddr;
12165 else
12166 {
12167 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12168 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12169 }
12170 }
12171
12172 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12173 return VINF_SUCCESS;
12174}
12175
12176
12177/**
12178 * Calculates the effective address of a ModR/M memory operand.
12179 *
12180 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12181 *
12182 * @return Strict VBox status code.
12183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12184 * @param bRm The ModRM byte.
12185 * @param cbImm The size of any immediate following the
12186 * effective address opcode bytes. Important for
12187 * RIP relative addressing.
12188 * @param pGCPtrEff Where to return the effective address.
12189 * @param offRsp RSP displacement.
12190 */
12191IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12192{
12193 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12194 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12195# define SET_SS_DEF() \
12196 do \
12197 { \
12198 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12199 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12200 } while (0)
12201
12202 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12203 {
12204/** @todo Check the effective address size crap! */
12205 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12206 {
12207 uint16_t u16EffAddr;
12208
12209 /* Handle the disp16 form with no registers first. */
12210 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12211 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12212 else
12213 {
12214 /* Get the displacment. */
12215 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12216 {
12217 case 0: u16EffAddr = 0; break;
12218 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12219 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12220 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12221 }
12222
12223 /* Add the base and index registers to the disp. */
12224 switch (bRm & X86_MODRM_RM_MASK)
12225 {
12226 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12227 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12228 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12229 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12230 case 4: u16EffAddr += pCtx->si; break;
12231 case 5: u16EffAddr += pCtx->di; break;
12232 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12233 case 7: u16EffAddr += pCtx->bx; break;
12234 }
12235 }
12236
12237 *pGCPtrEff = u16EffAddr;
12238 }
12239 else
12240 {
12241 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12242 uint32_t u32EffAddr;
12243
12244 /* Handle the disp32 form with no registers first. */
12245 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12246 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12247 else
12248 {
12249 /* Get the register (or SIB) value. */
12250 switch ((bRm & X86_MODRM_RM_MASK))
12251 {
12252 case 0: u32EffAddr = pCtx->eax; break;
12253 case 1: u32EffAddr = pCtx->ecx; break;
12254 case 2: u32EffAddr = pCtx->edx; break;
12255 case 3: u32EffAddr = pCtx->ebx; break;
12256 case 4: /* SIB */
12257 {
12258 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12259
12260 /* Get the index and scale it. */
12261 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12262 {
12263 case 0: u32EffAddr = pCtx->eax; break;
12264 case 1: u32EffAddr = pCtx->ecx; break;
12265 case 2: u32EffAddr = pCtx->edx; break;
12266 case 3: u32EffAddr = pCtx->ebx; break;
12267 case 4: u32EffAddr = 0; /*none */ break;
12268 case 5: u32EffAddr = pCtx->ebp; break;
12269 case 6: u32EffAddr = pCtx->esi; break;
12270 case 7: u32EffAddr = pCtx->edi; break;
12271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12272 }
12273 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12274
12275 /* add base */
12276 switch (bSib & X86_SIB_BASE_MASK)
12277 {
12278 case 0: u32EffAddr += pCtx->eax; break;
12279 case 1: u32EffAddr += pCtx->ecx; break;
12280 case 2: u32EffAddr += pCtx->edx; break;
12281 case 3: u32EffAddr += pCtx->ebx; break;
12282 case 4:
12283 u32EffAddr += pCtx->esp + offRsp;
12284 SET_SS_DEF();
12285 break;
12286 case 5:
12287 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12288 {
12289 u32EffAddr += pCtx->ebp;
12290 SET_SS_DEF();
12291 }
12292 else
12293 {
12294 uint32_t u32Disp;
12295 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12296 u32EffAddr += u32Disp;
12297 }
12298 break;
12299 case 6: u32EffAddr += pCtx->esi; break;
12300 case 7: u32EffAddr += pCtx->edi; break;
12301 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12302 }
12303 break;
12304 }
12305 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12306 case 6: u32EffAddr = pCtx->esi; break;
12307 case 7: u32EffAddr = pCtx->edi; break;
12308 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12309 }
12310
12311 /* Get and add the displacement. */
12312 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12313 {
12314 case 0:
12315 break;
12316 case 1:
12317 {
12318 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12319 u32EffAddr += i8Disp;
12320 break;
12321 }
12322 case 2:
12323 {
12324 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12325 u32EffAddr += u32Disp;
12326 break;
12327 }
12328 default:
12329 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12330 }
12331
12332 }
12333 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12334 *pGCPtrEff = u32EffAddr;
12335 else
12336 {
12337 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12338 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12339 }
12340 }
12341 }
12342 else
12343 {
12344 uint64_t u64EffAddr;
12345
12346 /* Handle the rip+disp32 form with no registers first. */
12347 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12348 {
12349 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12350 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12351 }
12352 else
12353 {
12354 /* Get the register (or SIB) value. */
12355 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12356 {
12357 case 0: u64EffAddr = pCtx->rax; break;
12358 case 1: u64EffAddr = pCtx->rcx; break;
12359 case 2: u64EffAddr = pCtx->rdx; break;
12360 case 3: u64EffAddr = pCtx->rbx; break;
12361 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12362 case 6: u64EffAddr = pCtx->rsi; break;
12363 case 7: u64EffAddr = pCtx->rdi; break;
12364 case 8: u64EffAddr = pCtx->r8; break;
12365 case 9: u64EffAddr = pCtx->r9; break;
12366 case 10: u64EffAddr = pCtx->r10; break;
12367 case 11: u64EffAddr = pCtx->r11; break;
12368 case 13: u64EffAddr = pCtx->r13; break;
12369 case 14: u64EffAddr = pCtx->r14; break;
12370 case 15: u64EffAddr = pCtx->r15; break;
12371 /* SIB */
12372 case 4:
12373 case 12:
12374 {
12375 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12376
12377 /* Get the index and scale it. */
12378 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12379 {
12380 case 0: u64EffAddr = pCtx->rax; break;
12381 case 1: u64EffAddr = pCtx->rcx; break;
12382 case 2: u64EffAddr = pCtx->rdx; break;
12383 case 3: u64EffAddr = pCtx->rbx; break;
12384 case 4: u64EffAddr = 0; /*none */ break;
12385 case 5: u64EffAddr = pCtx->rbp; break;
12386 case 6: u64EffAddr = pCtx->rsi; break;
12387 case 7: u64EffAddr = pCtx->rdi; break;
12388 case 8: u64EffAddr = pCtx->r8; break;
12389 case 9: u64EffAddr = pCtx->r9; break;
12390 case 10: u64EffAddr = pCtx->r10; break;
12391 case 11: u64EffAddr = pCtx->r11; break;
12392 case 12: u64EffAddr = pCtx->r12; break;
12393 case 13: u64EffAddr = pCtx->r13; break;
12394 case 14: u64EffAddr = pCtx->r14; break;
12395 case 15: u64EffAddr = pCtx->r15; break;
12396 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12397 }
12398 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12399
12400 /* add base */
12401 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12402 {
12403 case 0: u64EffAddr += pCtx->rax; break;
12404 case 1: u64EffAddr += pCtx->rcx; break;
12405 case 2: u64EffAddr += pCtx->rdx; break;
12406 case 3: u64EffAddr += pCtx->rbx; break;
12407 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12408 case 6: u64EffAddr += pCtx->rsi; break;
12409 case 7: u64EffAddr += pCtx->rdi; break;
12410 case 8: u64EffAddr += pCtx->r8; break;
12411 case 9: u64EffAddr += pCtx->r9; break;
12412 case 10: u64EffAddr += pCtx->r10; break;
12413 case 11: u64EffAddr += pCtx->r11; break;
12414 case 12: u64EffAddr += pCtx->r12; break;
12415 case 14: u64EffAddr += pCtx->r14; break;
12416 case 15: u64EffAddr += pCtx->r15; break;
12417 /* complicated encodings */
12418 case 5:
12419 case 13:
12420 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12421 {
12422 if (!pVCpu->iem.s.uRexB)
12423 {
12424 u64EffAddr += pCtx->rbp;
12425 SET_SS_DEF();
12426 }
12427 else
12428 u64EffAddr += pCtx->r13;
12429 }
12430 else
12431 {
12432 uint32_t u32Disp;
12433 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12434 u64EffAddr += (int32_t)u32Disp;
12435 }
12436 break;
12437 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12438 }
12439 break;
12440 }
12441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12442 }
12443
12444 /* Get and add the displacement. */
12445 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12446 {
12447 case 0:
12448 break;
12449 case 1:
12450 {
12451 int8_t i8Disp;
12452 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12453 u64EffAddr += i8Disp;
12454 break;
12455 }
12456 case 2:
12457 {
12458 uint32_t u32Disp;
12459 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12460 u64EffAddr += (int32_t)u32Disp;
12461 break;
12462 }
12463 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12464 }
12465
12466 }
12467
12468 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12469 *pGCPtrEff = u64EffAddr;
12470 else
12471 {
12472 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12473 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12474 }
12475 }
12476
12477 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12478 return VINF_SUCCESS;
12479}
12480
12481
12482#ifdef IEM_WITH_SETJMP
12483/**
12484 * Calculates the effective address of a ModR/M memory operand.
12485 *
12486 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12487 *
12488 * May longjmp on internal error.
12489 *
12490 * @return The effective address.
12491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12492 * @param bRm The ModRM byte.
12493 * @param cbImm The size of any immediate following the
12494 * effective address opcode bytes. Important for
12495 * RIP relative addressing.
12496 */
12497IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12498{
12499 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12500 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12501# define SET_SS_DEF() \
12502 do \
12503 { \
12504 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12505 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12506 } while (0)
12507
12508 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12509 {
12510/** @todo Check the effective address size crap! */
12511 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12512 {
12513 uint16_t u16EffAddr;
12514
12515 /* Handle the disp16 form with no registers first. */
12516 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12517 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12518 else
12519 {
12520 /* Get the displacment. */
12521 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12522 {
12523 case 0: u16EffAddr = 0; break;
12524 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12525 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12526 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12527 }
12528
12529 /* Add the base and index registers to the disp. */
12530 switch (bRm & X86_MODRM_RM_MASK)
12531 {
12532 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12533 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12534 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12535 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12536 case 4: u16EffAddr += pCtx->si; break;
12537 case 5: u16EffAddr += pCtx->di; break;
12538 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12539 case 7: u16EffAddr += pCtx->bx; break;
12540 }
12541 }
12542
12543 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12544 return u16EffAddr;
12545 }
12546
12547 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12548 uint32_t u32EffAddr;
12549
12550 /* Handle the disp32 form with no registers first. */
12551 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12552 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12553 else
12554 {
12555 /* Get the register (or SIB) value. */
12556 switch ((bRm & X86_MODRM_RM_MASK))
12557 {
12558 case 0: u32EffAddr = pCtx->eax; break;
12559 case 1: u32EffAddr = pCtx->ecx; break;
12560 case 2: u32EffAddr = pCtx->edx; break;
12561 case 3: u32EffAddr = pCtx->ebx; break;
12562 case 4: /* SIB */
12563 {
12564 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12565
12566 /* Get the index and scale it. */
12567 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12568 {
12569 case 0: u32EffAddr = pCtx->eax; break;
12570 case 1: u32EffAddr = pCtx->ecx; break;
12571 case 2: u32EffAddr = pCtx->edx; break;
12572 case 3: u32EffAddr = pCtx->ebx; break;
12573 case 4: u32EffAddr = 0; /*none */ break;
12574 case 5: u32EffAddr = pCtx->ebp; break;
12575 case 6: u32EffAddr = pCtx->esi; break;
12576 case 7: u32EffAddr = pCtx->edi; break;
12577 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12578 }
12579 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12580
12581 /* add base */
12582 switch (bSib & X86_SIB_BASE_MASK)
12583 {
12584 case 0: u32EffAddr += pCtx->eax; break;
12585 case 1: u32EffAddr += pCtx->ecx; break;
12586 case 2: u32EffAddr += pCtx->edx; break;
12587 case 3: u32EffAddr += pCtx->ebx; break;
12588 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12589 case 5:
12590 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12591 {
12592 u32EffAddr += pCtx->ebp;
12593 SET_SS_DEF();
12594 }
12595 else
12596 {
12597 uint32_t u32Disp;
12598 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12599 u32EffAddr += u32Disp;
12600 }
12601 break;
12602 case 6: u32EffAddr += pCtx->esi; break;
12603 case 7: u32EffAddr += pCtx->edi; break;
12604 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12605 }
12606 break;
12607 }
12608 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12609 case 6: u32EffAddr = pCtx->esi; break;
12610 case 7: u32EffAddr = pCtx->edi; break;
12611 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12612 }
12613
12614 /* Get and add the displacement. */
12615 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12616 {
12617 case 0:
12618 break;
12619 case 1:
12620 {
12621 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12622 u32EffAddr += i8Disp;
12623 break;
12624 }
12625 case 2:
12626 {
12627 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12628 u32EffAddr += u32Disp;
12629 break;
12630 }
12631 default:
12632 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12633 }
12634 }
12635
12636 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12637 {
12638 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12639 return u32EffAddr;
12640 }
12641 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12642 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12643 return u32EffAddr & UINT16_MAX;
12644 }
12645
12646 uint64_t u64EffAddr;
12647
12648 /* Handle the rip+disp32 form with no registers first. */
12649 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12650 {
12651 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12652 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12653 }
12654 else
12655 {
12656 /* Get the register (or SIB) value. */
12657 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12658 {
12659 case 0: u64EffAddr = pCtx->rax; break;
12660 case 1: u64EffAddr = pCtx->rcx; break;
12661 case 2: u64EffAddr = pCtx->rdx; break;
12662 case 3: u64EffAddr = pCtx->rbx; break;
12663 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12664 case 6: u64EffAddr = pCtx->rsi; break;
12665 case 7: u64EffAddr = pCtx->rdi; break;
12666 case 8: u64EffAddr = pCtx->r8; break;
12667 case 9: u64EffAddr = pCtx->r9; break;
12668 case 10: u64EffAddr = pCtx->r10; break;
12669 case 11: u64EffAddr = pCtx->r11; break;
12670 case 13: u64EffAddr = pCtx->r13; break;
12671 case 14: u64EffAddr = pCtx->r14; break;
12672 case 15: u64EffAddr = pCtx->r15; break;
12673 /* SIB */
12674 case 4:
12675 case 12:
12676 {
12677 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12678
12679 /* Get the index and scale it. */
12680 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12681 {
12682 case 0: u64EffAddr = pCtx->rax; break;
12683 case 1: u64EffAddr = pCtx->rcx; break;
12684 case 2: u64EffAddr = pCtx->rdx; break;
12685 case 3: u64EffAddr = pCtx->rbx; break;
12686 case 4: u64EffAddr = 0; /*none */ break;
12687 case 5: u64EffAddr = pCtx->rbp; break;
12688 case 6: u64EffAddr = pCtx->rsi; break;
12689 case 7: u64EffAddr = pCtx->rdi; break;
12690 case 8: u64EffAddr = pCtx->r8; break;
12691 case 9: u64EffAddr = pCtx->r9; break;
12692 case 10: u64EffAddr = pCtx->r10; break;
12693 case 11: u64EffAddr = pCtx->r11; break;
12694 case 12: u64EffAddr = pCtx->r12; break;
12695 case 13: u64EffAddr = pCtx->r13; break;
12696 case 14: u64EffAddr = pCtx->r14; break;
12697 case 15: u64EffAddr = pCtx->r15; break;
12698 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12699 }
12700 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12701
12702 /* add base */
12703 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12704 {
12705 case 0: u64EffAddr += pCtx->rax; break;
12706 case 1: u64EffAddr += pCtx->rcx; break;
12707 case 2: u64EffAddr += pCtx->rdx; break;
12708 case 3: u64EffAddr += pCtx->rbx; break;
12709 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12710 case 6: u64EffAddr += pCtx->rsi; break;
12711 case 7: u64EffAddr += pCtx->rdi; break;
12712 case 8: u64EffAddr += pCtx->r8; break;
12713 case 9: u64EffAddr += pCtx->r9; break;
12714 case 10: u64EffAddr += pCtx->r10; break;
12715 case 11: u64EffAddr += pCtx->r11; break;
12716 case 12: u64EffAddr += pCtx->r12; break;
12717 case 14: u64EffAddr += pCtx->r14; break;
12718 case 15: u64EffAddr += pCtx->r15; break;
12719 /* complicated encodings */
12720 case 5:
12721 case 13:
12722 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12723 {
12724 if (!pVCpu->iem.s.uRexB)
12725 {
12726 u64EffAddr += pCtx->rbp;
12727 SET_SS_DEF();
12728 }
12729 else
12730 u64EffAddr += pCtx->r13;
12731 }
12732 else
12733 {
12734 uint32_t u32Disp;
12735 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12736 u64EffAddr += (int32_t)u32Disp;
12737 }
12738 break;
12739 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12740 }
12741 break;
12742 }
12743 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12744 }
12745
12746 /* Get and add the displacement. */
12747 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12748 {
12749 case 0:
12750 break;
12751 case 1:
12752 {
12753 int8_t i8Disp;
12754 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12755 u64EffAddr += i8Disp;
12756 break;
12757 }
12758 case 2:
12759 {
12760 uint32_t u32Disp;
12761 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12762 u64EffAddr += (int32_t)u32Disp;
12763 break;
12764 }
12765 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12766 }
12767
12768 }
12769
12770 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12771 {
12772 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12773 return u64EffAddr;
12774 }
12775 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12776 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12777 return u64EffAddr & UINT32_MAX;
12778}
12779#endif /* IEM_WITH_SETJMP */
12780
12781
12782/** @} */
12783
12784
12785
12786/*
12787 * Include the instructions
12788 */
12789#include "IEMAllInstructions.cpp.h"
12790
12791
12792
12793
12794#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12795
12796/**
12797 * Sets up execution verification mode.
12798 */
12799IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12800{
12801 PVMCPU pVCpu = pVCpu;
12802 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12803
12804 /*
12805 * Always note down the address of the current instruction.
12806 */
12807 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12808 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12809
12810 /*
12811 * Enable verification and/or logging.
12812 */
12813 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12814 if ( fNewNoRem
12815 && ( 0
12816#if 0 /* auto enable on first paged protected mode interrupt */
12817 || ( pOrgCtx->eflags.Bits.u1IF
12818 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12819 && TRPMHasTrap(pVCpu)
12820 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12821#endif
12822#if 0
12823 || ( pOrgCtx->cs == 0x10
12824 && ( pOrgCtx->rip == 0x90119e3e
12825 || pOrgCtx->rip == 0x901d9810)
12826#endif
12827#if 0 /* Auto enable DSL - FPU stuff. */
12828 || ( pOrgCtx->cs == 0x10
12829 && (// pOrgCtx->rip == 0xc02ec07f
12830 //|| pOrgCtx->rip == 0xc02ec082
12831 //|| pOrgCtx->rip == 0xc02ec0c9
12832 0
12833 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12834#endif
12835#if 0 /* Auto enable DSL - fstp st0 stuff. */
12836 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12837#endif
12838#if 0
12839 || pOrgCtx->rip == 0x9022bb3a
12840#endif
12841#if 0
12842 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12843#endif
12844#if 0
12845 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12846 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12847#endif
12848#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12849 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12850 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12851 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12852#endif
12853#if 0 /* NT4SP1 - xadd early boot. */
12854 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12855#endif
12856#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12857 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12858#endif
12859#if 0 /* NT4SP1 - cmpxchg (AMD). */
12860 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12861#endif
12862#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12863 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12864#endif
12865#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12866 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12867
12868#endif
12869#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12870 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12871
12872#endif
12873#if 0 /* NT4SP1 - frstor [ecx] */
12874 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12875#endif
12876#if 0 /* xxxxxx - All long mode code. */
12877 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12878#endif
12879#if 0 /* rep movsq linux 3.7 64-bit boot. */
12880 || (pOrgCtx->rip == 0x0000000000100241)
12881#endif
12882#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12883 || (pOrgCtx->rip == 0x000000000215e240)
12884#endif
12885#if 0 /* DOS's size-overridden iret to v8086. */
12886 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12887#endif
12888 )
12889 )
12890 {
12891 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12892 RTLogFlags(NULL, "enabled");
12893 fNewNoRem = false;
12894 }
12895 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12896 {
12897 pVCpu->iem.s.fNoRem = fNewNoRem;
12898 if (!fNewNoRem)
12899 {
12900 LogAlways(("Enabling verification mode!\n"));
12901 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12902 }
12903 else
12904 LogAlways(("Disabling verification mode!\n"));
12905 }
12906
12907 /*
12908 * Switch state.
12909 */
12910 if (IEM_VERIFICATION_ENABLED(pVCpu))
12911 {
12912 static CPUMCTX s_DebugCtx; /* Ugly! */
12913
12914 s_DebugCtx = *pOrgCtx;
12915 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12916 }
12917
12918 /*
12919 * See if there is an interrupt pending in TRPM and inject it if we can.
12920 */
12921 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12922 if ( pOrgCtx->eflags.Bits.u1IF
12923 && TRPMHasTrap(pVCpu)
12924 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12925 {
12926 uint8_t u8TrapNo;
12927 TRPMEVENT enmType;
12928 RTGCUINT uErrCode;
12929 RTGCPTR uCr2;
12930 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12931 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12932 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12933 TRPMResetTrap(pVCpu);
12934 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12935 }
12936
12937 /*
12938 * Reset the counters.
12939 */
12940 pVCpu->iem.s.cIOReads = 0;
12941 pVCpu->iem.s.cIOWrites = 0;
12942 pVCpu->iem.s.fIgnoreRaxRdx = false;
12943 pVCpu->iem.s.fOverlappingMovs = false;
12944 pVCpu->iem.s.fProblematicMemory = false;
12945 pVCpu->iem.s.fUndefinedEFlags = 0;
12946
12947 if (IEM_VERIFICATION_ENABLED(pVCpu))
12948 {
12949 /*
12950 * Free all verification records.
12951 */
12952 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12953 pVCpu->iem.s.pIemEvtRecHead = NULL;
12954 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12955 do
12956 {
12957 while (pEvtRec)
12958 {
12959 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12960 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12961 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12962 pEvtRec = pNext;
12963 }
12964 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12965 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12966 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12967 } while (pEvtRec);
12968 }
12969}
12970
12971
12972/**
12973 * Allocate an event record.
12974 * @returns Pointer to a record.
12975 */
12976IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12977{
12978 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12979 return NULL;
12980
12981 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12982 if (pEvtRec)
12983 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12984 else
12985 {
12986 if (!pVCpu->iem.s.ppIemEvtRecNext)
12987 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12988
12989 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12990 if (!pEvtRec)
12991 return NULL;
12992 }
12993 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12994 pEvtRec->pNext = NULL;
12995 return pEvtRec;
12996}
12997
12998
12999/**
13000 * IOMMMIORead notification.
13001 */
13002VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13003{
13004 PVMCPU pVCpu = VMMGetCpu(pVM);
13005 if (!pVCpu)
13006 return;
13007 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13008 if (!pEvtRec)
13009 return;
13010 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13011 pEvtRec->u.RamRead.GCPhys = GCPhys;
13012 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13013 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13014 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13015}
13016
13017
13018/**
13019 * IOMMMIOWrite notification.
13020 */
13021VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13022{
13023 PVMCPU pVCpu = VMMGetCpu(pVM);
13024 if (!pVCpu)
13025 return;
13026 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13027 if (!pEvtRec)
13028 return;
13029 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13030 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13031 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13032 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13033 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13034 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13035 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13036 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13037 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13038}
13039
13040
13041/**
13042 * IOMIOPortRead notification.
13043 */
13044VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13045{
13046 PVMCPU pVCpu = VMMGetCpu(pVM);
13047 if (!pVCpu)
13048 return;
13049 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13050 if (!pEvtRec)
13051 return;
13052 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13053 pEvtRec->u.IOPortRead.Port = Port;
13054 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13055 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13056 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13057}
13058
13059/**
13060 * IOMIOPortWrite notification.
13061 */
13062VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13063{
13064 PVMCPU pVCpu = VMMGetCpu(pVM);
13065 if (!pVCpu)
13066 return;
13067 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13068 if (!pEvtRec)
13069 return;
13070 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13071 pEvtRec->u.IOPortWrite.Port = Port;
13072 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13073 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13074 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13075 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13076}
13077
13078
13079VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13080{
13081 PVMCPU pVCpu = VMMGetCpu(pVM);
13082 if (!pVCpu)
13083 return;
13084 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13085 if (!pEvtRec)
13086 return;
13087 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13088 pEvtRec->u.IOPortStrRead.Port = Port;
13089 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13090 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13091 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13092 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13093}
13094
13095
13096VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13097{
13098 PVMCPU pVCpu = VMMGetCpu(pVM);
13099 if (!pVCpu)
13100 return;
13101 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13102 if (!pEvtRec)
13103 return;
13104 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13105 pEvtRec->u.IOPortStrWrite.Port = Port;
13106 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13107 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13108 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13109 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13110}
13111
13112
13113/**
13114 * Fakes and records an I/O port read.
13115 *
13116 * @returns VINF_SUCCESS.
13117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13118 * @param Port The I/O port.
13119 * @param pu32Value Where to store the fake value.
13120 * @param cbValue The size of the access.
13121 */
13122IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13123{
13124 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13125 if (pEvtRec)
13126 {
13127 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13128 pEvtRec->u.IOPortRead.Port = Port;
13129 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13130 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13131 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13132 }
13133 pVCpu->iem.s.cIOReads++;
13134 *pu32Value = 0xcccccccc;
13135 return VINF_SUCCESS;
13136}
13137
13138
13139/**
13140 * Fakes and records an I/O port write.
13141 *
13142 * @returns VINF_SUCCESS.
13143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13144 * @param Port The I/O port.
13145 * @param u32Value The value being written.
13146 * @param cbValue The size of the access.
13147 */
13148IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13149{
13150 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13151 if (pEvtRec)
13152 {
13153 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13154 pEvtRec->u.IOPortWrite.Port = Port;
13155 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13156 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13157 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13158 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13159 }
13160 pVCpu->iem.s.cIOWrites++;
13161 return VINF_SUCCESS;
13162}
13163
13164
13165/**
13166 * Used to add extra details about a stub case.
13167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13168 */
13169IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13170{
13171 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13172 PVM pVM = pVCpu->CTX_SUFF(pVM);
13173 PVMCPU pVCpu = pVCpu;
13174 char szRegs[4096];
13175 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13176 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13177 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13178 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13179 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13180 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13181 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13182 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13183 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13184 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13185 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13186 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13187 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13188 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13189 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13190 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13191 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13192 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13193 " efer=%016VR{efer}\n"
13194 " pat=%016VR{pat}\n"
13195 " sf_mask=%016VR{sf_mask}\n"
13196 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13197 " lstar=%016VR{lstar}\n"
13198 " star=%016VR{star} cstar=%016VR{cstar}\n"
13199 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13200 );
13201
13202 char szInstr1[256];
13203 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13204 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13205 szInstr1, sizeof(szInstr1), NULL);
13206 char szInstr2[256];
13207 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13208 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13209 szInstr2, sizeof(szInstr2), NULL);
13210
13211 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13212}
13213
13214
13215/**
13216 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13217 * dump to the assertion info.
13218 *
13219 * @param pEvtRec The record to dump.
13220 */
13221IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13222{
13223 switch (pEvtRec->enmEvent)
13224 {
13225 case IEMVERIFYEVENT_IOPORT_READ:
13226 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13227 pEvtRec->u.IOPortWrite.Port,
13228 pEvtRec->u.IOPortWrite.cbValue);
13229 break;
13230 case IEMVERIFYEVENT_IOPORT_WRITE:
13231 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13232 pEvtRec->u.IOPortWrite.Port,
13233 pEvtRec->u.IOPortWrite.cbValue,
13234 pEvtRec->u.IOPortWrite.u32Value);
13235 break;
13236 case IEMVERIFYEVENT_IOPORT_STR_READ:
13237 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13238 pEvtRec->u.IOPortStrWrite.Port,
13239 pEvtRec->u.IOPortStrWrite.cbValue,
13240 pEvtRec->u.IOPortStrWrite.cTransfers);
13241 break;
13242 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13243 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13244 pEvtRec->u.IOPortStrWrite.Port,
13245 pEvtRec->u.IOPortStrWrite.cbValue,
13246 pEvtRec->u.IOPortStrWrite.cTransfers);
13247 break;
13248 case IEMVERIFYEVENT_RAM_READ:
13249 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13250 pEvtRec->u.RamRead.GCPhys,
13251 pEvtRec->u.RamRead.cb);
13252 break;
13253 case IEMVERIFYEVENT_RAM_WRITE:
13254 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13255 pEvtRec->u.RamWrite.GCPhys,
13256 pEvtRec->u.RamWrite.cb,
13257 (int)pEvtRec->u.RamWrite.cb,
13258 pEvtRec->u.RamWrite.ab);
13259 break;
13260 default:
13261 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13262 break;
13263 }
13264}
13265
13266
13267/**
13268 * Raises an assertion on the specified record, showing the given message with
13269 * a record dump attached.
13270 *
13271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13272 * @param pEvtRec1 The first record.
13273 * @param pEvtRec2 The second record.
13274 * @param pszMsg The message explaining why we're asserting.
13275 */
13276IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13277{
13278 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13279 iemVerifyAssertAddRecordDump(pEvtRec1);
13280 iemVerifyAssertAddRecordDump(pEvtRec2);
13281 iemVerifyAssertMsg2(pVCpu);
13282 RTAssertPanic();
13283}
13284
13285
13286/**
13287 * Raises an assertion on the specified record, showing the given message with
13288 * a record dump attached.
13289 *
13290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13291 * @param pEvtRec1 The first record.
13292 * @param pszMsg The message explaining why we're asserting.
13293 */
13294IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13295{
13296 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13297 iemVerifyAssertAddRecordDump(pEvtRec);
13298 iemVerifyAssertMsg2(pVCpu);
13299 RTAssertPanic();
13300}
13301
13302
13303/**
13304 * Verifies a write record.
13305 *
13306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13307 * @param pEvtRec The write record.
13308 * @param fRem Set if REM was doing the other executing. If clear
13309 * it was HM.
13310 */
13311IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13312{
13313 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13314 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13315 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13316 if ( RT_FAILURE(rc)
13317 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13318 {
13319 /* fend off ins */
13320 if ( !pVCpu->iem.s.cIOReads
13321 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13322 || ( pEvtRec->u.RamWrite.cb != 1
13323 && pEvtRec->u.RamWrite.cb != 2
13324 && pEvtRec->u.RamWrite.cb != 4) )
13325 {
13326 /* fend off ROMs and MMIO */
13327 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13328 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13329 {
13330 /* fend off fxsave */
13331 if (pEvtRec->u.RamWrite.cb != 512)
13332 {
13333 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13334 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13335 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13336 RTAssertMsg2Add("%s: %.*Rhxs\n"
13337 "iem: %.*Rhxs\n",
13338 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13339 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13340 iemVerifyAssertAddRecordDump(pEvtRec);
13341 iemVerifyAssertMsg2(pVCpu);
13342 RTAssertPanic();
13343 }
13344 }
13345 }
13346 }
13347
13348}
13349
13350/**
13351 * Performs the post-execution verfication checks.
13352 */
13353IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13354{
13355 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13356 return rcStrictIem;
13357
13358 /*
13359 * Switch back the state.
13360 */
13361 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13362 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13363 Assert(pOrgCtx != pDebugCtx);
13364 IEM_GET_CTX(pVCpu) = pOrgCtx;
13365
13366 /*
13367 * Execute the instruction in REM.
13368 */
13369 bool fRem = false;
13370 PVM pVM = pVCpu->CTX_SUFF(pVM);
13371 PVMCPU pVCpu = pVCpu;
13372 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13373#ifdef IEM_VERIFICATION_MODE_FULL_HM
13374 if ( HMIsEnabled(pVM)
13375 && pVCpu->iem.s.cIOReads == 0
13376 && pVCpu->iem.s.cIOWrites == 0
13377 && !pVCpu->iem.s.fProblematicMemory)
13378 {
13379 uint64_t uStartRip = pOrgCtx->rip;
13380 unsigned iLoops = 0;
13381 do
13382 {
13383 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13384 iLoops++;
13385 } while ( rc == VINF_SUCCESS
13386 || ( rc == VINF_EM_DBG_STEPPED
13387 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13388 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13389 || ( pOrgCtx->rip != pDebugCtx->rip
13390 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13391 && iLoops < 8) );
13392 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13393 rc = VINF_SUCCESS;
13394 }
13395#endif
13396 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13397 || rc == VINF_IOM_R3_IOPORT_READ
13398 || rc == VINF_IOM_R3_IOPORT_WRITE
13399 || rc == VINF_IOM_R3_MMIO_READ
13400 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13401 || rc == VINF_IOM_R3_MMIO_WRITE
13402 || rc == VINF_CPUM_R3_MSR_READ
13403 || rc == VINF_CPUM_R3_MSR_WRITE
13404 || rc == VINF_EM_RESCHEDULE
13405 )
13406 {
13407 EMRemLock(pVM);
13408 rc = REMR3EmulateInstruction(pVM, pVCpu);
13409 AssertRC(rc);
13410 EMRemUnlock(pVM);
13411 fRem = true;
13412 }
13413
13414# if 1 /* Skip unimplemented instructions for now. */
13415 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13416 {
13417 IEM_GET_CTX(pVCpu) = pOrgCtx;
13418 if (rc == VINF_EM_DBG_STEPPED)
13419 return VINF_SUCCESS;
13420 return rc;
13421 }
13422# endif
13423
13424 /*
13425 * Compare the register states.
13426 */
13427 unsigned cDiffs = 0;
13428 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13429 {
13430 //Log(("REM and IEM ends up with different registers!\n"));
13431 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13432
13433# define CHECK_FIELD(a_Field) \
13434 do \
13435 { \
13436 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13437 { \
13438 switch (sizeof(pOrgCtx->a_Field)) \
13439 { \
13440 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13441 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13442 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13443 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13444 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13445 } \
13446 cDiffs++; \
13447 } \
13448 } while (0)
13449# define CHECK_XSTATE_FIELD(a_Field) \
13450 do \
13451 { \
13452 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13453 { \
13454 switch (sizeof(pOrgXState->a_Field)) \
13455 { \
13456 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13457 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13458 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13459 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13460 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13461 } \
13462 cDiffs++; \
13463 } \
13464 } while (0)
13465
13466# define CHECK_BIT_FIELD(a_Field) \
13467 do \
13468 { \
13469 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13470 { \
13471 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13472 cDiffs++; \
13473 } \
13474 } while (0)
13475
13476# define CHECK_SEL(a_Sel) \
13477 do \
13478 { \
13479 CHECK_FIELD(a_Sel.Sel); \
13480 CHECK_FIELD(a_Sel.Attr.u); \
13481 CHECK_FIELD(a_Sel.u64Base); \
13482 CHECK_FIELD(a_Sel.u32Limit); \
13483 CHECK_FIELD(a_Sel.fFlags); \
13484 } while (0)
13485
13486 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13487 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13488
13489#if 1 /* The recompiler doesn't update these the intel way. */
13490 if (fRem)
13491 {
13492 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13493 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13494 pOrgXState->x87.CS = pDebugXState->x87.CS;
13495 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13496 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13497 pOrgXState->x87.DS = pDebugXState->x87.DS;
13498 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13499 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13500 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13501 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13502 }
13503#endif
13504 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13505 {
13506 RTAssertMsg2Weak(" the FPU state differs\n");
13507 cDiffs++;
13508 CHECK_XSTATE_FIELD(x87.FCW);
13509 CHECK_XSTATE_FIELD(x87.FSW);
13510 CHECK_XSTATE_FIELD(x87.FTW);
13511 CHECK_XSTATE_FIELD(x87.FOP);
13512 CHECK_XSTATE_FIELD(x87.FPUIP);
13513 CHECK_XSTATE_FIELD(x87.CS);
13514 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13515 CHECK_XSTATE_FIELD(x87.FPUDP);
13516 CHECK_XSTATE_FIELD(x87.DS);
13517 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13518 CHECK_XSTATE_FIELD(x87.MXCSR);
13519 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13520 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13521 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13522 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13523 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13524 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13525 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13526 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13527 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13528 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13529 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13530 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13531 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13532 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13533 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13534 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13535 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13536 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13537 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13538 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13539 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13540 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13541 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13542 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13543 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13544 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13545 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13546 }
13547 CHECK_FIELD(rip);
13548 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13549 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13550 {
13551 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13552 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13553 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13554 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13555 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13556 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13557 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13558 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13559 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13560 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13561 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13562 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13563 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13564 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13565 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13566 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13567 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13568 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13569 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13570 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13571 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13572 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13573 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13574 }
13575
13576 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13577 CHECK_FIELD(rax);
13578 CHECK_FIELD(rcx);
13579 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13580 CHECK_FIELD(rdx);
13581 CHECK_FIELD(rbx);
13582 CHECK_FIELD(rsp);
13583 CHECK_FIELD(rbp);
13584 CHECK_FIELD(rsi);
13585 CHECK_FIELD(rdi);
13586 CHECK_FIELD(r8);
13587 CHECK_FIELD(r9);
13588 CHECK_FIELD(r10);
13589 CHECK_FIELD(r11);
13590 CHECK_FIELD(r12);
13591 CHECK_FIELD(r13);
13592 CHECK_SEL(cs);
13593 CHECK_SEL(ss);
13594 CHECK_SEL(ds);
13595 CHECK_SEL(es);
13596 CHECK_SEL(fs);
13597 CHECK_SEL(gs);
13598 CHECK_FIELD(cr0);
13599
13600 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13601 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13602 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13603 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13604 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13605 {
13606 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13607 { /* ignore */ }
13608 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13609 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13610 && fRem)
13611 { /* ignore */ }
13612 else
13613 CHECK_FIELD(cr2);
13614 }
13615 CHECK_FIELD(cr3);
13616 CHECK_FIELD(cr4);
13617 CHECK_FIELD(dr[0]);
13618 CHECK_FIELD(dr[1]);
13619 CHECK_FIELD(dr[2]);
13620 CHECK_FIELD(dr[3]);
13621 CHECK_FIELD(dr[6]);
13622 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13623 CHECK_FIELD(dr[7]);
13624 CHECK_FIELD(gdtr.cbGdt);
13625 CHECK_FIELD(gdtr.pGdt);
13626 CHECK_FIELD(idtr.cbIdt);
13627 CHECK_FIELD(idtr.pIdt);
13628 CHECK_SEL(ldtr);
13629 CHECK_SEL(tr);
13630 CHECK_FIELD(SysEnter.cs);
13631 CHECK_FIELD(SysEnter.eip);
13632 CHECK_FIELD(SysEnter.esp);
13633 CHECK_FIELD(msrEFER);
13634 CHECK_FIELD(msrSTAR);
13635 CHECK_FIELD(msrPAT);
13636 CHECK_FIELD(msrLSTAR);
13637 CHECK_FIELD(msrCSTAR);
13638 CHECK_FIELD(msrSFMASK);
13639 CHECK_FIELD(msrKERNELGSBASE);
13640
13641 if (cDiffs != 0)
13642 {
13643 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13644 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13645 RTAssertPanic();
13646 static bool volatile s_fEnterDebugger = true;
13647 if (s_fEnterDebugger)
13648 DBGFSTOP(pVM);
13649
13650# if 1 /* Ignore unimplemented instructions for now. */
13651 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13652 rcStrictIem = VINF_SUCCESS;
13653# endif
13654 }
13655# undef CHECK_FIELD
13656# undef CHECK_BIT_FIELD
13657 }
13658
13659 /*
13660 * If the register state compared fine, check the verification event
13661 * records.
13662 */
13663 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13664 {
13665 /*
13666 * Compare verficiation event records.
13667 * - I/O port accesses should be a 1:1 match.
13668 */
13669 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13670 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13671 while (pIemRec && pOtherRec)
13672 {
13673 /* Since we might miss RAM writes and reads, ignore reads and check
13674 that any written memory is the same extra ones. */
13675 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13676 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13677 && pIemRec->pNext)
13678 {
13679 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13680 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13681 pIemRec = pIemRec->pNext;
13682 }
13683
13684 /* Do the compare. */
13685 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13686 {
13687 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13688 break;
13689 }
13690 bool fEquals;
13691 switch (pIemRec->enmEvent)
13692 {
13693 case IEMVERIFYEVENT_IOPORT_READ:
13694 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13695 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13696 break;
13697 case IEMVERIFYEVENT_IOPORT_WRITE:
13698 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13699 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13700 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13701 break;
13702 case IEMVERIFYEVENT_IOPORT_STR_READ:
13703 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13704 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13705 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13706 break;
13707 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13708 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13709 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13710 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13711 break;
13712 case IEMVERIFYEVENT_RAM_READ:
13713 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13714 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13715 break;
13716 case IEMVERIFYEVENT_RAM_WRITE:
13717 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13718 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13719 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13720 break;
13721 default:
13722 fEquals = false;
13723 break;
13724 }
13725 if (!fEquals)
13726 {
13727 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13728 break;
13729 }
13730
13731 /* advance */
13732 pIemRec = pIemRec->pNext;
13733 pOtherRec = pOtherRec->pNext;
13734 }
13735
13736 /* Ignore extra writes and reads. */
13737 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13738 {
13739 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13740 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13741 pIemRec = pIemRec->pNext;
13742 }
13743 if (pIemRec != NULL)
13744 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13745 else if (pOtherRec != NULL)
13746 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13747 }
13748 IEM_GET_CTX(pVCpu) = pOrgCtx;
13749
13750 return rcStrictIem;
13751}
13752
13753#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13754
13755/* stubs */
13756IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13757{
13758 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13759 return VERR_INTERNAL_ERROR;
13760}
13761
13762IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13763{
13764 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13765 return VERR_INTERNAL_ERROR;
13766}
13767
13768#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13769
13770
13771#ifdef LOG_ENABLED
13772/**
13773 * Logs the current instruction.
13774 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13775 * @param pCtx The current CPU context.
13776 * @param fSameCtx Set if we have the same context information as the VMM,
13777 * clear if we may have already executed an instruction in
13778 * our debug context. When clear, we assume IEMCPU holds
13779 * valid CPU mode info.
13780 */
13781IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13782{
13783# ifdef IN_RING3
13784 if (LogIs2Enabled())
13785 {
13786 char szInstr[256];
13787 uint32_t cbInstr = 0;
13788 if (fSameCtx)
13789 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13790 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13791 szInstr, sizeof(szInstr), &cbInstr);
13792 else
13793 {
13794 uint32_t fFlags = 0;
13795 switch (pVCpu->iem.s.enmCpuMode)
13796 {
13797 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13798 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13799 case IEMMODE_16BIT:
13800 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13801 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13802 else
13803 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13804 break;
13805 }
13806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13807 szInstr, sizeof(szInstr), &cbInstr);
13808 }
13809
13810 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13811 Log2(("****\n"
13812 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13813 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13814 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13815 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13816 " %s\n"
13817 ,
13818 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13819 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13820 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13821 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13822 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13823 szInstr));
13824
13825 if (LogIs3Enabled())
13826 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13827 }
13828 else
13829# endif
13830 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13831 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13832 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13833}
13834#endif
13835
13836
13837/**
13838 * Makes status code addjustments (pass up from I/O and access handler)
13839 * as well as maintaining statistics.
13840 *
13841 * @returns Strict VBox status code to pass up.
13842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13843 * @param rcStrict The status from executing an instruction.
13844 */
13845DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13846{
13847 if (rcStrict != VINF_SUCCESS)
13848 {
13849 if (RT_SUCCESS(rcStrict))
13850 {
13851 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13852 || rcStrict == VINF_IOM_R3_IOPORT_READ
13853 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13854 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13855 || rcStrict == VINF_IOM_R3_MMIO_READ
13856 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13857 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13858 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13859 || rcStrict == VINF_CPUM_R3_MSR_READ
13860 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13861 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13862 || rcStrict == VINF_EM_RAW_TO_R3
13863 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13864 /* raw-mode / virt handlers only: */
13865 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13866 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13867 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13868 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13869 || rcStrict == VINF_SELM_SYNC_GDT
13870 || rcStrict == VINF_CSAM_PENDING_ACTION
13871 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13872 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13873/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13874 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13875 if (rcPassUp == VINF_SUCCESS)
13876 pVCpu->iem.s.cRetInfStatuses++;
13877 else if ( rcPassUp < VINF_EM_FIRST
13878 || rcPassUp > VINF_EM_LAST
13879 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13880 {
13881 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13882 pVCpu->iem.s.cRetPassUpStatus++;
13883 rcStrict = rcPassUp;
13884 }
13885 else
13886 {
13887 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13888 pVCpu->iem.s.cRetInfStatuses++;
13889 }
13890 }
13891 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13892 pVCpu->iem.s.cRetAspectNotImplemented++;
13893 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13894 pVCpu->iem.s.cRetInstrNotImplemented++;
13895#ifdef IEM_VERIFICATION_MODE_FULL
13896 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13897 rcStrict = VINF_SUCCESS;
13898#endif
13899 else
13900 pVCpu->iem.s.cRetErrStatuses++;
13901 }
13902 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13903 {
13904 pVCpu->iem.s.cRetPassUpStatus++;
13905 rcStrict = pVCpu->iem.s.rcPassUp;
13906 }
13907
13908 return rcStrict;
13909}
13910
13911
13912/**
13913 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13914 * IEMExecOneWithPrefetchedByPC.
13915 *
13916 * Similar code is found in IEMExecLots.
13917 *
13918 * @return Strict VBox status code.
13919 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13921 * @param fExecuteInhibit If set, execute the instruction following CLI,
13922 * POP SS and MOV SS,GR.
13923 */
13924DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13925{
13926#ifdef IEM_WITH_SETJMP
13927 VBOXSTRICTRC rcStrict;
13928 jmp_buf JmpBuf;
13929 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13930 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13931 if ((rcStrict = setjmp(JmpBuf)) == 0)
13932 {
13933 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13934 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13935 }
13936 else
13937 pVCpu->iem.s.cLongJumps++;
13938 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13939#else
13940 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13941 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13942#endif
13943 if (rcStrict == VINF_SUCCESS)
13944 pVCpu->iem.s.cInstructions++;
13945 if (pVCpu->iem.s.cActiveMappings > 0)
13946 {
13947 Assert(rcStrict != VINF_SUCCESS);
13948 iemMemRollback(pVCpu);
13949 }
13950//#ifdef DEBUG
13951// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13952//#endif
13953
13954 /* Execute the next instruction as well if a cli, pop ss or
13955 mov ss, Gr has just completed successfully. */
13956 if ( fExecuteInhibit
13957 && rcStrict == VINF_SUCCESS
13958 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13959 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13960 {
13961 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13962 if (rcStrict == VINF_SUCCESS)
13963 {
13964#ifdef LOG_ENABLED
13965 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13966#endif
13967#ifdef IEM_WITH_SETJMP
13968 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13969 if ((rcStrict = setjmp(JmpBuf)) == 0)
13970 {
13971 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13972 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13973 }
13974 else
13975 pVCpu->iem.s.cLongJumps++;
13976 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13977#else
13978 IEM_OPCODE_GET_NEXT_U8(&b);
13979 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13980#endif
13981 if (rcStrict == VINF_SUCCESS)
13982 pVCpu->iem.s.cInstructions++;
13983 if (pVCpu->iem.s.cActiveMappings > 0)
13984 {
13985 Assert(rcStrict != VINF_SUCCESS);
13986 iemMemRollback(pVCpu);
13987 }
13988 }
13989 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13990 }
13991
13992 /*
13993 * Return value fiddling, statistics and sanity assertions.
13994 */
13995 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13996
13997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13999#if defined(IEM_VERIFICATION_MODE_FULL)
14000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14004#endif
14005 return rcStrict;
14006}
14007
14008
14009#ifdef IN_RC
14010/**
14011 * Re-enters raw-mode or ensure we return to ring-3.
14012 *
14013 * @returns rcStrict, maybe modified.
14014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14015 * @param pCtx The current CPU context.
14016 * @param rcStrict The status code returne by the interpreter.
14017 */
14018DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14019{
14020 if ( !pVCpu->iem.s.fInPatchCode
14021 && ( rcStrict == VINF_SUCCESS
14022 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14023 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14024 {
14025 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14026 CPUMRawEnter(pVCpu);
14027 else
14028 {
14029 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14030 rcStrict = VINF_EM_RESCHEDULE;
14031 }
14032 }
14033 return rcStrict;
14034}
14035#endif
14036
14037
14038/**
14039 * Execute one instruction.
14040 *
14041 * @return Strict VBox status code.
14042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14043 */
14044VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14045{
14046#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14047 if (++pVCpu->iem.s.cVerifyDepth == 1)
14048 iemExecVerificationModeSetup(pVCpu);
14049#endif
14050#ifdef LOG_ENABLED
14051 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14052 iemLogCurInstr(pVCpu, pCtx, true);
14053#endif
14054
14055 /*
14056 * Do the decoding and emulation.
14057 */
14058 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14059 if (rcStrict == VINF_SUCCESS)
14060 rcStrict = iemExecOneInner(pVCpu, true);
14061
14062#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14063 /*
14064 * Assert some sanity.
14065 */
14066 if (pVCpu->iem.s.cVerifyDepth == 1)
14067 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14068 pVCpu->iem.s.cVerifyDepth--;
14069#endif
14070#ifdef IN_RC
14071 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14072#endif
14073 if (rcStrict != VINF_SUCCESS)
14074 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14075 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14076 return rcStrict;
14077}
14078
14079
14080VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14081{
14082 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14083 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14084
14085 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14086 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14087 if (rcStrict == VINF_SUCCESS)
14088 {
14089 rcStrict = iemExecOneInner(pVCpu, true);
14090 if (pcbWritten)
14091 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14092 }
14093
14094#ifdef IN_RC
14095 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14096#endif
14097 return rcStrict;
14098}
14099
14100
14101VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14102 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14103{
14104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14105 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14106
14107 VBOXSTRICTRC rcStrict;
14108 if ( cbOpcodeBytes
14109 && pCtx->rip == OpcodeBytesPC)
14110 {
14111 iemInitDecoder(pVCpu, false);
14112#ifdef IEM_WITH_CODE_TLB
14113 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14114 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14115 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14116 pVCpu->iem.s.offCurInstrStart = 0;
14117 pVCpu->iem.s.offInstrNextByte = 0;
14118#else
14119 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14120 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14121#endif
14122 rcStrict = VINF_SUCCESS;
14123 }
14124 else
14125 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14126 if (rcStrict == VINF_SUCCESS)
14127 {
14128 rcStrict = iemExecOneInner(pVCpu, true);
14129 }
14130
14131#ifdef IN_RC
14132 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14133#endif
14134 return rcStrict;
14135}
14136
14137
14138VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14139{
14140 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14141 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14142
14143 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14144 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14145 if (rcStrict == VINF_SUCCESS)
14146 {
14147 rcStrict = iemExecOneInner(pVCpu, false);
14148 if (pcbWritten)
14149 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14150 }
14151
14152#ifdef IN_RC
14153 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14154#endif
14155 return rcStrict;
14156}
14157
14158
14159VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14160 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14161{
14162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14163 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14164
14165 VBOXSTRICTRC rcStrict;
14166 if ( cbOpcodeBytes
14167 && pCtx->rip == OpcodeBytesPC)
14168 {
14169 iemInitDecoder(pVCpu, true);
14170#ifdef IEM_WITH_CODE_TLB
14171 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14172 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14173 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14174 pVCpu->iem.s.offCurInstrStart = 0;
14175 pVCpu->iem.s.offInstrNextByte = 0;
14176#else
14177 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14178 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14179#endif
14180 rcStrict = VINF_SUCCESS;
14181 }
14182 else
14183 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14184 if (rcStrict == VINF_SUCCESS)
14185 rcStrict = iemExecOneInner(pVCpu, false);
14186
14187#ifdef IN_RC
14188 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14189#endif
14190 return rcStrict;
14191}
14192
14193
14194/**
14195 * For debugging DISGetParamSize, may come in handy.
14196 *
14197 * @returns Strict VBox status code.
14198 * @param pVCpu The cross context virtual CPU structure of the
14199 * calling EMT.
14200 * @param pCtxCore The context core structure.
14201 * @param OpcodeBytesPC The PC of the opcode bytes.
14202 * @param pvOpcodeBytes Prefeched opcode bytes.
14203 * @param cbOpcodeBytes Number of prefetched bytes.
14204 * @param pcbWritten Where to return the number of bytes written.
14205 * Optional.
14206 */
14207VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14208 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14209 uint32_t *pcbWritten)
14210{
14211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14212 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14213
14214 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14215 VBOXSTRICTRC rcStrict;
14216 if ( cbOpcodeBytes
14217 && pCtx->rip == OpcodeBytesPC)
14218 {
14219 iemInitDecoder(pVCpu, true);
14220#ifdef IEM_WITH_CODE_TLB
14221 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14222 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14223 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14224 pVCpu->iem.s.offCurInstrStart = 0;
14225 pVCpu->iem.s.offInstrNextByte = 0;
14226#else
14227 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14228 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14229#endif
14230 rcStrict = VINF_SUCCESS;
14231 }
14232 else
14233 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14234 if (rcStrict == VINF_SUCCESS)
14235 {
14236 rcStrict = iemExecOneInner(pVCpu, false);
14237 if (pcbWritten)
14238 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14239 }
14240
14241#ifdef IN_RC
14242 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14243#endif
14244 return rcStrict;
14245}
14246
14247
14248VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14249{
14250 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14251
14252#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14253 /*
14254 * See if there is an interrupt pending in TRPM, inject it if we can.
14255 */
14256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14257# ifdef IEM_VERIFICATION_MODE_FULL
14258 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14259# endif
14260 if ( pCtx->eflags.Bits.u1IF
14261 && TRPMHasTrap(pVCpu)
14262 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14263 {
14264 uint8_t u8TrapNo;
14265 TRPMEVENT enmType;
14266 RTGCUINT uErrCode;
14267 RTGCPTR uCr2;
14268 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14269 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14270 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14271 TRPMResetTrap(pVCpu);
14272 }
14273
14274 /*
14275 * Log the state.
14276 */
14277# ifdef LOG_ENABLED
14278 iemLogCurInstr(pVCpu, pCtx, true);
14279# endif
14280
14281 /*
14282 * Do the decoding and emulation.
14283 */
14284 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14285 if (rcStrict == VINF_SUCCESS)
14286 rcStrict = iemExecOneInner(pVCpu, true);
14287
14288 /*
14289 * Assert some sanity.
14290 */
14291 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14292
14293 /*
14294 * Log and return.
14295 */
14296 if (rcStrict != VINF_SUCCESS)
14297 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14298 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14299 if (pcInstructions)
14300 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14301 return rcStrict;
14302
14303#else /* Not verification mode */
14304
14305 /*
14306 * See if there is an interrupt pending in TRPM, inject it if we can.
14307 */
14308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14309# ifdef IEM_VERIFICATION_MODE_FULL
14310 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14311# endif
14312 if ( pCtx->eflags.Bits.u1IF
14313 && TRPMHasTrap(pVCpu)
14314 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14315 {
14316 uint8_t u8TrapNo;
14317 TRPMEVENT enmType;
14318 RTGCUINT uErrCode;
14319 RTGCPTR uCr2;
14320 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14321 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14322 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14323 TRPMResetTrap(pVCpu);
14324 }
14325
14326 /*
14327 * Initial decoder init w/ prefetch, then setup setjmp.
14328 */
14329 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14330 if (rcStrict == VINF_SUCCESS)
14331 {
14332# ifdef IEM_WITH_SETJMP
14333 jmp_buf JmpBuf;
14334 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14335 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14336 pVCpu->iem.s.cActiveMappings = 0;
14337 if ((rcStrict = setjmp(JmpBuf)) == 0)
14338# endif
14339 {
14340 /*
14341 * The run loop. We limit ourselves to 4096 instructions right now.
14342 */
14343 PVM pVM = pVCpu->CTX_SUFF(pVM);
14344 uint32_t cInstr = 4096;
14345 for (;;)
14346 {
14347 /*
14348 * Log the state.
14349 */
14350# ifdef LOG_ENABLED
14351 iemLogCurInstr(pVCpu, pCtx, true);
14352# endif
14353
14354 /*
14355 * Do the decoding and emulation.
14356 */
14357 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14358 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14359 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14360 {
14361 Assert(pVCpu->iem.s.cActiveMappings == 0);
14362 pVCpu->iem.s.cInstructions++;
14363 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14364 {
14365 uint32_t fCpu = pVCpu->fLocalForcedActions
14366 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14367 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14368 | VMCPU_FF_TLB_FLUSH
14369# ifdef VBOX_WITH_RAW_MODE
14370 | VMCPU_FF_TRPM_SYNC_IDT
14371 | VMCPU_FF_SELM_SYNC_TSS
14372 | VMCPU_FF_SELM_SYNC_GDT
14373 | VMCPU_FF_SELM_SYNC_LDT
14374# endif
14375 | VMCPU_FF_INHIBIT_INTERRUPTS
14376 | VMCPU_FF_BLOCK_NMIS
14377 | VMCPU_FF_UNHALT ));
14378
14379 if (RT_LIKELY( ( !fCpu
14380 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14381 && !pCtx->rflags.Bits.u1IF) )
14382 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14383 {
14384 if (cInstr-- > 0)
14385 {
14386 Assert(pVCpu->iem.s.cActiveMappings == 0);
14387 iemReInitDecoder(pVCpu);
14388 continue;
14389 }
14390 }
14391 }
14392 Assert(pVCpu->iem.s.cActiveMappings == 0);
14393 }
14394 else if (pVCpu->iem.s.cActiveMappings > 0)
14395 iemMemRollback(pVCpu);
14396 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14397 break;
14398 }
14399 }
14400# ifdef IEM_WITH_SETJMP
14401 else
14402 {
14403 if (pVCpu->iem.s.cActiveMappings > 0)
14404 iemMemRollback(pVCpu);
14405 pVCpu->iem.s.cLongJumps++;
14406 }
14407 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14408# endif
14409
14410 /*
14411 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14412 */
14413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14414 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14415# if defined(IEM_VERIFICATION_MODE_FULL)
14416 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14417 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14418 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14419 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14420# endif
14421 }
14422
14423 /*
14424 * Maybe re-enter raw-mode and log.
14425 */
14426# ifdef IN_RC
14427 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14428# endif
14429 if (rcStrict != VINF_SUCCESS)
14430 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14431 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14432 if (pcInstructions)
14433 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14434 return rcStrict;
14435#endif /* Not verification mode */
14436}
14437
14438
14439
14440/**
14441 * Injects a trap, fault, abort, software interrupt or external interrupt.
14442 *
14443 * The parameter list matches TRPMQueryTrapAll pretty closely.
14444 *
14445 * @returns Strict VBox status code.
14446 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14447 * @param u8TrapNo The trap number.
14448 * @param enmType What type is it (trap/fault/abort), software
14449 * interrupt or hardware interrupt.
14450 * @param uErrCode The error code if applicable.
14451 * @param uCr2 The CR2 value if applicable.
14452 * @param cbInstr The instruction length (only relevant for
14453 * software interrupts).
14454 */
14455VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14456 uint8_t cbInstr)
14457{
14458 iemInitDecoder(pVCpu, false);
14459#ifdef DBGFTRACE_ENABLED
14460 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14461 u8TrapNo, enmType, uErrCode, uCr2);
14462#endif
14463
14464 uint32_t fFlags;
14465 switch (enmType)
14466 {
14467 case TRPM_HARDWARE_INT:
14468 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14469 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14470 uErrCode = uCr2 = 0;
14471 break;
14472
14473 case TRPM_SOFTWARE_INT:
14474 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14475 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14476 uErrCode = uCr2 = 0;
14477 break;
14478
14479 case TRPM_TRAP:
14480 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14481 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14482 if (u8TrapNo == X86_XCPT_PF)
14483 fFlags |= IEM_XCPT_FLAGS_CR2;
14484 switch (u8TrapNo)
14485 {
14486 case X86_XCPT_DF:
14487 case X86_XCPT_TS:
14488 case X86_XCPT_NP:
14489 case X86_XCPT_SS:
14490 case X86_XCPT_PF:
14491 case X86_XCPT_AC:
14492 fFlags |= IEM_XCPT_FLAGS_ERR;
14493 break;
14494
14495 case X86_XCPT_NMI:
14496 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14497 break;
14498 }
14499 break;
14500
14501 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14502 }
14503
14504 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14505}
14506
14507
14508/**
14509 * Injects the active TRPM event.
14510 *
14511 * @returns Strict VBox status code.
14512 * @param pVCpu The cross context virtual CPU structure.
14513 */
14514VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14515{
14516#ifndef IEM_IMPLEMENTS_TASKSWITCH
14517 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14518#else
14519 uint8_t u8TrapNo;
14520 TRPMEVENT enmType;
14521 RTGCUINT uErrCode;
14522 RTGCUINTPTR uCr2;
14523 uint8_t cbInstr;
14524 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14525 if (RT_FAILURE(rc))
14526 return rc;
14527
14528 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14529
14530 /** @todo Are there any other codes that imply the event was successfully
14531 * delivered to the guest? See @bugref{6607}. */
14532 if ( rcStrict == VINF_SUCCESS
14533 || rcStrict == VINF_IEM_RAISED_XCPT)
14534 {
14535 TRPMResetTrap(pVCpu);
14536 }
14537 return rcStrict;
14538#endif
14539}
14540
14541
14542VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14543{
14544 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14545 return VERR_NOT_IMPLEMENTED;
14546}
14547
14548
14549VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14550{
14551 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14552 return VERR_NOT_IMPLEMENTED;
14553}
14554
14555
14556#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14557/**
14558 * Executes a IRET instruction with default operand size.
14559 *
14560 * This is for PATM.
14561 *
14562 * @returns VBox status code.
14563 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14564 * @param pCtxCore The register frame.
14565 */
14566VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14567{
14568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14569
14570 iemCtxCoreToCtx(pCtx, pCtxCore);
14571 iemInitDecoder(pVCpu);
14572 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14573 if (rcStrict == VINF_SUCCESS)
14574 iemCtxToCtxCore(pCtxCore, pCtx);
14575 else
14576 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14577 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14578 return rcStrict;
14579}
14580#endif
14581
14582
14583/**
14584 * Macro used by the IEMExec* method to check the given instruction length.
14585 *
14586 * Will return on failure!
14587 *
14588 * @param a_cbInstr The given instruction length.
14589 * @param a_cbMin The minimum length.
14590 */
14591#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14592 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14593 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14594
14595
14596/**
14597 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14598 *
14599 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14600 *
14601 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14603 * @param rcStrict The status code to fiddle.
14604 */
14605DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14606{
14607 iemUninitExec(pVCpu);
14608#ifdef IN_RC
14609 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14610 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14611#else
14612 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14613#endif
14614}
14615
14616
14617/**
14618 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14619 *
14620 * This API ASSUMES that the caller has already verified that the guest code is
14621 * allowed to access the I/O port. (The I/O port is in the DX register in the
14622 * guest state.)
14623 *
14624 * @returns Strict VBox status code.
14625 * @param pVCpu The cross context virtual CPU structure.
14626 * @param cbValue The size of the I/O port access (1, 2, or 4).
14627 * @param enmAddrMode The addressing mode.
14628 * @param fRepPrefix Indicates whether a repeat prefix is used
14629 * (doesn't matter which for this instruction).
14630 * @param cbInstr The instruction length in bytes.
14631 * @param iEffSeg The effective segment address.
14632 * @param fIoChecked Whether the access to the I/O port has been
14633 * checked or not. It's typically checked in the
14634 * HM scenario.
14635 */
14636VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14637 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14638{
14639 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14641
14642 /*
14643 * State init.
14644 */
14645 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14646
14647 /*
14648 * Switch orgy for getting to the right handler.
14649 */
14650 VBOXSTRICTRC rcStrict;
14651 if (fRepPrefix)
14652 {
14653 switch (enmAddrMode)
14654 {
14655 case IEMMODE_16BIT:
14656 switch (cbValue)
14657 {
14658 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14659 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14660 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14661 default:
14662 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14663 }
14664 break;
14665
14666 case IEMMODE_32BIT:
14667 switch (cbValue)
14668 {
14669 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14670 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14671 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14672 default:
14673 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14674 }
14675 break;
14676
14677 case IEMMODE_64BIT:
14678 switch (cbValue)
14679 {
14680 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14681 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14682 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14683 default:
14684 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14685 }
14686 break;
14687
14688 default:
14689 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14690 }
14691 }
14692 else
14693 {
14694 switch (enmAddrMode)
14695 {
14696 case IEMMODE_16BIT:
14697 switch (cbValue)
14698 {
14699 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14700 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14701 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14702 default:
14703 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14704 }
14705 break;
14706
14707 case IEMMODE_32BIT:
14708 switch (cbValue)
14709 {
14710 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14711 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14712 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14713 default:
14714 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14715 }
14716 break;
14717
14718 case IEMMODE_64BIT:
14719 switch (cbValue)
14720 {
14721 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14722 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14723 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14724 default:
14725 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14726 }
14727 break;
14728
14729 default:
14730 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14731 }
14732 }
14733
14734 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14735}
14736
14737
14738/**
14739 * Interface for HM and EM for executing string I/O IN (read) instructions.
14740 *
14741 * This API ASSUMES that the caller has already verified that the guest code is
14742 * allowed to access the I/O port. (The I/O port is in the DX register in the
14743 * guest state.)
14744 *
14745 * @returns Strict VBox status code.
14746 * @param pVCpu The cross context virtual CPU structure.
14747 * @param cbValue The size of the I/O port access (1, 2, or 4).
14748 * @param enmAddrMode The addressing mode.
14749 * @param fRepPrefix Indicates whether a repeat prefix is used
14750 * (doesn't matter which for this instruction).
14751 * @param cbInstr The instruction length in bytes.
14752 * @param fIoChecked Whether the access to the I/O port has been
14753 * checked or not. It's typically checked in the
14754 * HM scenario.
14755 */
14756VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14757 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14758{
14759 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14760
14761 /*
14762 * State init.
14763 */
14764 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14765
14766 /*
14767 * Switch orgy for getting to the right handler.
14768 */
14769 VBOXSTRICTRC rcStrict;
14770 if (fRepPrefix)
14771 {
14772 switch (enmAddrMode)
14773 {
14774 case IEMMODE_16BIT:
14775 switch (cbValue)
14776 {
14777 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14778 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14779 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14780 default:
14781 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14782 }
14783 break;
14784
14785 case IEMMODE_32BIT:
14786 switch (cbValue)
14787 {
14788 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14789 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14790 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14791 default:
14792 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14793 }
14794 break;
14795
14796 case IEMMODE_64BIT:
14797 switch (cbValue)
14798 {
14799 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14800 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14801 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14802 default:
14803 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14804 }
14805 break;
14806
14807 default:
14808 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14809 }
14810 }
14811 else
14812 {
14813 switch (enmAddrMode)
14814 {
14815 case IEMMODE_16BIT:
14816 switch (cbValue)
14817 {
14818 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14819 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14820 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14821 default:
14822 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14823 }
14824 break;
14825
14826 case IEMMODE_32BIT:
14827 switch (cbValue)
14828 {
14829 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14830 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14831 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14832 default:
14833 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14834 }
14835 break;
14836
14837 case IEMMODE_64BIT:
14838 switch (cbValue)
14839 {
14840 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14841 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14842 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14843 default:
14844 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14845 }
14846 break;
14847
14848 default:
14849 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14850 }
14851 }
14852
14853 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14854}
14855
14856
14857/**
14858 * Interface for rawmode to write execute an OUT instruction.
14859 *
14860 * @returns Strict VBox status code.
14861 * @param pVCpu The cross context virtual CPU structure.
14862 * @param cbInstr The instruction length in bytes.
14863 * @param u16Port The port to read.
14864 * @param cbReg The register size.
14865 *
14866 * @remarks In ring-0 not all of the state needs to be synced in.
14867 */
14868VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14869{
14870 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14871 Assert(cbReg <= 4 && cbReg != 3);
14872
14873 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14874 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14875 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14876}
14877
14878
14879/**
14880 * Interface for rawmode to write execute an IN instruction.
14881 *
14882 * @returns Strict VBox status code.
14883 * @param pVCpu The cross context virtual CPU structure.
14884 * @param cbInstr The instruction length in bytes.
14885 * @param u16Port The port to read.
14886 * @param cbReg The register size.
14887 */
14888VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14889{
14890 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14891 Assert(cbReg <= 4 && cbReg != 3);
14892
14893 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14894 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14895 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14896}
14897
14898
14899/**
14900 * Interface for HM and EM to write to a CRx register.
14901 *
14902 * @returns Strict VBox status code.
14903 * @param pVCpu The cross context virtual CPU structure.
14904 * @param cbInstr The instruction length in bytes.
14905 * @param iCrReg The control register number (destination).
14906 * @param iGReg The general purpose register number (source).
14907 *
14908 * @remarks In ring-0 not all of the state needs to be synced in.
14909 */
14910VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14911{
14912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14913 Assert(iCrReg < 16);
14914 Assert(iGReg < 16);
14915
14916 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14917 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14918 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14919}
14920
14921
14922/**
14923 * Interface for HM and EM to read from a CRx register.
14924 *
14925 * @returns Strict VBox status code.
14926 * @param pVCpu The cross context virtual CPU structure.
14927 * @param cbInstr The instruction length in bytes.
14928 * @param iGReg The general purpose register number (destination).
14929 * @param iCrReg The control register number (source).
14930 *
14931 * @remarks In ring-0 not all of the state needs to be synced in.
14932 */
14933VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14934{
14935 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14936 Assert(iCrReg < 16);
14937 Assert(iGReg < 16);
14938
14939 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14940 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14941 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14942}
14943
14944
14945/**
14946 * Interface for HM and EM to clear the CR0[TS] bit.
14947 *
14948 * @returns Strict VBox status code.
14949 * @param pVCpu The cross context virtual CPU structure.
14950 * @param cbInstr The instruction length in bytes.
14951 *
14952 * @remarks In ring-0 not all of the state needs to be synced in.
14953 */
14954VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14955{
14956 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14957
14958 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14959 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14960 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14961}
14962
14963
14964/**
14965 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14966 *
14967 * @returns Strict VBox status code.
14968 * @param pVCpu The cross context virtual CPU structure.
14969 * @param cbInstr The instruction length in bytes.
14970 * @param uValue The value to load into CR0.
14971 *
14972 * @remarks In ring-0 not all of the state needs to be synced in.
14973 */
14974VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14975{
14976 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14977
14978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14980 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14981}
14982
14983
14984/**
14985 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14986 *
14987 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14988 *
14989 * @returns Strict VBox status code.
14990 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14991 * @param cbInstr The instruction length in bytes.
14992 * @remarks In ring-0 not all of the state needs to be synced in.
14993 * @thread EMT(pVCpu)
14994 */
14995VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14996{
14997 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14998
14999 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15000 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15001 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15002}
15003
15004
15005#ifdef VBOX_WITH_NESTED_HWVIRT
15006/**
15007 * Checks if IEM is in the process of delivering an event (interrupt or
15008 * exception).
15009 *
15010 * @returns true if it's raising an interrupt or exception, false otherwise.
15011 * @param pVCpu The cross context virtual CPU structure.
15012 */
15013VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
15014{
15015 return pVCpu->iem.s.cXcptRecursions > 0;
15016}
15017
15018
15019/**
15020 * Interface for HM and EM to emulate the STGI instruction.
15021 *
15022 * @returns Strict VBox status code.
15023 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15024 * @param cbInstr The instruction length in bytes.
15025 * @thread EMT(pVCpu)
15026 */
15027VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15028{
15029 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15030
15031 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15032 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15033 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15034}
15035
15036
15037/**
15038 * Interface for HM and EM to emulate the STGI instruction.
15039 *
15040 * @returns Strict VBox status code.
15041 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15042 * @param cbInstr The instruction length in bytes.
15043 * @thread EMT(pVCpu)
15044 */
15045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15046{
15047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15048
15049 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15050 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15052}
15053
15054
15055/**
15056 * Interface for HM and EM to emulate the VMLOAD instruction.
15057 *
15058 * @returns Strict VBox status code.
15059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15060 * @param cbInstr The instruction length in bytes.
15061 * @thread EMT(pVCpu)
15062 */
15063VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15064{
15065 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15066
15067 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15068 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15069 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15070}
15071
15072
15073/**
15074 * Interface for HM and EM to emulate the VMSAVE instruction.
15075 *
15076 * @returns Strict VBox status code.
15077 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15078 * @param cbInstr The instruction length in bytes.
15079 * @thread EMT(pVCpu)
15080 */
15081VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15082{
15083 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15084
15085 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15086 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15087 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15088}
15089
15090
15091/**
15092 * Interface for HM and EM to emulate the INVLPGA instruction.
15093 *
15094 * @returns Strict VBox status code.
15095 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15096 * @param cbInstr The instruction length in bytes.
15097 * @thread EMT(pVCpu)
15098 */
15099VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15100{
15101 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15102
15103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15104 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15106}
15107#endif /* VBOX_WITH_NESTED_HWVIRT */
15108
15109#ifdef IN_RING3
15110
15111/**
15112 * Handles the unlikely and probably fatal merge cases.
15113 *
15114 * @returns Merged status code.
15115 * @param rcStrict Current EM status code.
15116 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15117 * with @a rcStrict.
15118 * @param iMemMap The memory mapping index. For error reporting only.
15119 * @param pVCpu The cross context virtual CPU structure of the calling
15120 * thread, for error reporting only.
15121 */
15122DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15123 unsigned iMemMap, PVMCPU pVCpu)
15124{
15125 if (RT_FAILURE_NP(rcStrict))
15126 return rcStrict;
15127
15128 if (RT_FAILURE_NP(rcStrictCommit))
15129 return rcStrictCommit;
15130
15131 if (rcStrict == rcStrictCommit)
15132 return rcStrictCommit;
15133
15134 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15135 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15139 return VERR_IOM_FF_STATUS_IPE;
15140}
15141
15142
15143/**
15144 * Helper for IOMR3ProcessForceFlag.
15145 *
15146 * @returns Merged status code.
15147 * @param rcStrict Current EM status code.
15148 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15149 * with @a rcStrict.
15150 * @param iMemMap The memory mapping index. For error reporting only.
15151 * @param pVCpu The cross context virtual CPU structure of the calling
15152 * thread, for error reporting only.
15153 */
15154DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15155{
15156 /* Simple. */
15157 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15158 return rcStrictCommit;
15159
15160 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15161 return rcStrict;
15162
15163 /* EM scheduling status codes. */
15164 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15165 && rcStrict <= VINF_EM_LAST))
15166 {
15167 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15168 && rcStrictCommit <= VINF_EM_LAST))
15169 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15170 }
15171
15172 /* Unlikely */
15173 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15174}
15175
15176
15177/**
15178 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15179 *
15180 * @returns Merge between @a rcStrict and what the commit operation returned.
15181 * @param pVM The cross context VM structure.
15182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15183 * @param rcStrict The status code returned by ring-0 or raw-mode.
15184 */
15185VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15186{
15187 /*
15188 * Reset the pending commit.
15189 */
15190 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15191 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15192 ("%#x %#x %#x\n",
15193 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15194 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15195
15196 /*
15197 * Commit the pending bounce buffers (usually just one).
15198 */
15199 unsigned cBufs = 0;
15200 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15201 while (iMemMap-- > 0)
15202 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15203 {
15204 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15205 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15206 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15207
15208 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15209 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15210 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15211
15212 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15213 {
15214 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15216 pbBuf,
15217 cbFirst,
15218 PGMACCESSORIGIN_IEM);
15219 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15220 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15221 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15222 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15223 }
15224
15225 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15226 {
15227 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15229 pbBuf + cbFirst,
15230 cbSecond,
15231 PGMACCESSORIGIN_IEM);
15232 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15233 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15234 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15235 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15236 }
15237 cBufs++;
15238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15239 }
15240
15241 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15242 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15243 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15244 pVCpu->iem.s.cActiveMappings = 0;
15245 return rcStrict;
15246}
15247
15248#endif /* IN_RING3 */
15249
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette