VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66000

Last change on this file since 66000 was 66000, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: Preps for SVM vmrun/#VMEXIT impl.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 586.6 KB
Line 
1/* $Id: IEMAll.cpp 66000 2017-03-08 20:29:40Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218
219/*********************************************************************************************************************************
220* Defined Constants And Macros *
221*********************************************************************************************************************************/
222/** @def IEM_WITH_SETJMP
223 * Enables alternative status code handling using setjmps.
224 *
225 * This adds a bit of expense via the setjmp() call since it saves all the
226 * non-volatile registers. However, it eliminates return code checks and allows
227 * for more optimal return value passing (return regs instead of stack buffer).
228 */
229#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
230# define IEM_WITH_SETJMP
231#endif
232
233/** Temporary hack to disable the double execution. Will be removed in favor
234 * of a dedicated execution mode in EM. */
235//#define IEM_VERIFICATION_MODE_NO_REM
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/** @def IEM_USE_UNALIGNED_DATA_ACCESS
363 * Use unaligned accesses instead of elaborate byte assembly. */
364#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
365# define IEM_USE_UNALIGNED_DATA_ACCESS
366#endif
367
368#ifdef VBOX_WITH_NESTED_HWVIRT
369/**
370 * Check the common SVM instruction preconditions.
371 */
372#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
373 do { \
374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
375 { \
376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
377 return iemRaiseUndefinedOpcode(pVCpu); \
378 } \
379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
380 { \
381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
382 return iemRaiseUndefinedOpcode(pVCpu); \
383 } \
384 if (pVCpu->iem.s.uCpl != 0) \
385 { \
386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
387 return iemRaiseGeneralProtectionFault0(pVCpu); \
388 } \
389 } while (0)
390
391/**
392 * Check if an SVM is enabled.
393 */
394#define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if an SVM control/instruction intercept is set.
398 */
399#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
400
401/**
402 * Check if an SVM read CRx intercept is set.
403 */
404#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
405
406/**
407 * Check if an SVM write CRx intercept is set.
408 */
409#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
410
411/**
412 * Check if an SVM read DRx intercept is set.
413 */
414#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
415
416/**
417 * Check if an SVM write DRx intercept is set.
418 */
419#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
420
421/**
422 * Check if an SVM exception intercept is set.
423 */
424#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
425#endif /* VBOX_WITH_NESTED_HWVIRT */
426
427
428/*********************************************************************************************************************************
429* Global Variables *
430*********************************************************************************************************************************/
431extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
432
433
434/** Function table for the ADD instruction. */
435IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
436{
437 iemAImpl_add_u8, iemAImpl_add_u8_locked,
438 iemAImpl_add_u16, iemAImpl_add_u16_locked,
439 iemAImpl_add_u32, iemAImpl_add_u32_locked,
440 iemAImpl_add_u64, iemAImpl_add_u64_locked
441};
442
443/** Function table for the ADC instruction. */
444IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
445{
446 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
447 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
448 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
449 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
450};
451
452/** Function table for the SUB instruction. */
453IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
454{
455 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
456 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
457 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
458 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
459};
460
461/** Function table for the SBB instruction. */
462IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
463{
464 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
465 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
466 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
467 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
468};
469
470/** Function table for the OR instruction. */
471IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
472{
473 iemAImpl_or_u8, iemAImpl_or_u8_locked,
474 iemAImpl_or_u16, iemAImpl_or_u16_locked,
475 iemAImpl_or_u32, iemAImpl_or_u32_locked,
476 iemAImpl_or_u64, iemAImpl_or_u64_locked
477};
478
479/** Function table for the XOR instruction. */
480IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
481{
482 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
483 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
484 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
485 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
486};
487
488/** Function table for the AND instruction. */
489IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
490{
491 iemAImpl_and_u8, iemAImpl_and_u8_locked,
492 iemAImpl_and_u16, iemAImpl_and_u16_locked,
493 iemAImpl_and_u32, iemAImpl_and_u32_locked,
494 iemAImpl_and_u64, iemAImpl_and_u64_locked
495};
496
497/** Function table for the CMP instruction.
498 * @remarks Making operand order ASSUMPTIONS.
499 */
500IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
501{
502 iemAImpl_cmp_u8, NULL,
503 iemAImpl_cmp_u16, NULL,
504 iemAImpl_cmp_u32, NULL,
505 iemAImpl_cmp_u64, NULL
506};
507
508/** Function table for the TEST instruction.
509 * @remarks Making operand order ASSUMPTIONS.
510 */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
512{
513 iemAImpl_test_u8, NULL,
514 iemAImpl_test_u16, NULL,
515 iemAImpl_test_u32, NULL,
516 iemAImpl_test_u64, NULL
517};
518
519/** Function table for the BT instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
521{
522 NULL, NULL,
523 iemAImpl_bt_u16, NULL,
524 iemAImpl_bt_u32, NULL,
525 iemAImpl_bt_u64, NULL
526};
527
528/** Function table for the BTC instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
530{
531 NULL, NULL,
532 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
533 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
534 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
535};
536
537/** Function table for the BTR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
539{
540 NULL, NULL,
541 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
542 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
543 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
544};
545
546/** Function table for the BTS instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
548{
549 NULL, NULL,
550 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
551 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
552 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
553};
554
555/** Function table for the BSF instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
557{
558 NULL, NULL,
559 iemAImpl_bsf_u16, NULL,
560 iemAImpl_bsf_u32, NULL,
561 iemAImpl_bsf_u64, NULL
562};
563
564/** Function table for the BSR instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
566{
567 NULL, NULL,
568 iemAImpl_bsr_u16, NULL,
569 iemAImpl_bsr_u32, NULL,
570 iemAImpl_bsr_u64, NULL
571};
572
573/** Function table for the IMUL instruction. */
574IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
575{
576 NULL, NULL,
577 iemAImpl_imul_two_u16, NULL,
578 iemAImpl_imul_two_u32, NULL,
579 iemAImpl_imul_two_u64, NULL
580};
581
582/** Group 1 /r lookup table. */
583IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
584{
585 &g_iemAImpl_add,
586 &g_iemAImpl_or,
587 &g_iemAImpl_adc,
588 &g_iemAImpl_sbb,
589 &g_iemAImpl_and,
590 &g_iemAImpl_sub,
591 &g_iemAImpl_xor,
592 &g_iemAImpl_cmp
593};
594
595/** Function table for the INC instruction. */
596IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
597{
598 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
599 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
600 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
601 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
602};
603
604/** Function table for the DEC instruction. */
605IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
606{
607 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
608 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
609 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
610 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
611};
612
613/** Function table for the NEG instruction. */
614IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
615{
616 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
617 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
618 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
619 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
620};
621
622/** Function table for the NOT instruction. */
623IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
624{
625 iemAImpl_not_u8, iemAImpl_not_u8_locked,
626 iemAImpl_not_u16, iemAImpl_not_u16_locked,
627 iemAImpl_not_u32, iemAImpl_not_u32_locked,
628 iemAImpl_not_u64, iemAImpl_not_u64_locked
629};
630
631
632/** Function table for the ROL instruction. */
633IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
634{
635 iemAImpl_rol_u8,
636 iemAImpl_rol_u16,
637 iemAImpl_rol_u32,
638 iemAImpl_rol_u64
639};
640
641/** Function table for the ROR instruction. */
642IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
643{
644 iemAImpl_ror_u8,
645 iemAImpl_ror_u16,
646 iemAImpl_ror_u32,
647 iemAImpl_ror_u64
648};
649
650/** Function table for the RCL instruction. */
651IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
652{
653 iemAImpl_rcl_u8,
654 iemAImpl_rcl_u16,
655 iemAImpl_rcl_u32,
656 iemAImpl_rcl_u64
657};
658
659/** Function table for the RCR instruction. */
660IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
661{
662 iemAImpl_rcr_u8,
663 iemAImpl_rcr_u16,
664 iemAImpl_rcr_u32,
665 iemAImpl_rcr_u64
666};
667
668/** Function table for the SHL instruction. */
669IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
670{
671 iemAImpl_shl_u8,
672 iemAImpl_shl_u16,
673 iemAImpl_shl_u32,
674 iemAImpl_shl_u64
675};
676
677/** Function table for the SHR instruction. */
678IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
679{
680 iemAImpl_shr_u8,
681 iemAImpl_shr_u16,
682 iemAImpl_shr_u32,
683 iemAImpl_shr_u64
684};
685
686/** Function table for the SAR instruction. */
687IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
688{
689 iemAImpl_sar_u8,
690 iemAImpl_sar_u16,
691 iemAImpl_sar_u32,
692 iemAImpl_sar_u64
693};
694
695
696/** Function table for the MUL instruction. */
697IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
698{
699 iemAImpl_mul_u8,
700 iemAImpl_mul_u16,
701 iemAImpl_mul_u32,
702 iemAImpl_mul_u64
703};
704
705/** Function table for the IMUL instruction working implicitly on rAX. */
706IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
707{
708 iemAImpl_imul_u8,
709 iemAImpl_imul_u16,
710 iemAImpl_imul_u32,
711 iemAImpl_imul_u64
712};
713
714/** Function table for the DIV instruction. */
715IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
716{
717 iemAImpl_div_u8,
718 iemAImpl_div_u16,
719 iemAImpl_div_u32,
720 iemAImpl_div_u64
721};
722
723/** Function table for the MUL instruction. */
724IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
725{
726 iemAImpl_idiv_u8,
727 iemAImpl_idiv_u16,
728 iemAImpl_idiv_u32,
729 iemAImpl_idiv_u64
730};
731
732/** Function table for the SHLD instruction */
733IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
734{
735 iemAImpl_shld_u16,
736 iemAImpl_shld_u32,
737 iemAImpl_shld_u64,
738};
739
740/** Function table for the SHRD instruction */
741IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
742{
743 iemAImpl_shrd_u16,
744 iemAImpl_shrd_u32,
745 iemAImpl_shrd_u64,
746};
747
748
749/** Function table for the PUNPCKLBW instruction */
750IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
751/** Function table for the PUNPCKLBD instruction */
752IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
753/** Function table for the PUNPCKLDQ instruction */
754IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
755/** Function table for the PUNPCKLQDQ instruction */
756IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
757
758/** Function table for the PUNPCKHBW instruction */
759IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
760/** Function table for the PUNPCKHBD instruction */
761IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
762/** Function table for the PUNPCKHDQ instruction */
763IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
764/** Function table for the PUNPCKHQDQ instruction */
765IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
766
767/** Function table for the PXOR instruction */
768IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
769/** Function table for the PCMPEQB instruction */
770IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
771/** Function table for the PCMPEQW instruction */
772IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
773/** Function table for the PCMPEQD instruction */
774IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
775
776
777#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
778/** What IEM just wrote. */
779uint8_t g_abIemWrote[256];
780/** How much IEM just wrote. */
781size_t g_cbIemWrote;
782#endif
783
784
785/*********************************************************************************************************************************
786* Internal Functions *
787*********************************************************************************************************************************/
788IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
789IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
790IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
791IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
792/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
793IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
794IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
795IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
796IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
797IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
798IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
799IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
800IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
801IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
802IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
803IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
804IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
805#ifdef IEM_WITH_SETJMP
806DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
807DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
808DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
809DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
810DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
811#endif
812
813IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
814IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
815IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
816IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
817IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
820IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
821IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
822IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
823IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
824IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
825IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
826IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
827IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
828IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
829
830#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
831IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
832#endif
833IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
834IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
835
836
837
838/**
839 * Sets the pass up status.
840 *
841 * @returns VINF_SUCCESS.
842 * @param pVCpu The cross context virtual CPU structure of the
843 * calling thread.
844 * @param rcPassUp The pass up status. Must be informational.
845 * VINF_SUCCESS is not allowed.
846 */
847IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
848{
849 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
850
851 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
852 if (rcOldPassUp == VINF_SUCCESS)
853 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
854 /* If both are EM scheduling codes, use EM priority rules. */
855 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
856 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
857 {
858 if (rcPassUp < rcOldPassUp)
859 {
860 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
861 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
862 }
863 else
864 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
865 }
866 /* Override EM scheduling with specific status code. */
867 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
868 {
869 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
870 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
871 }
872 /* Don't override specific status code, first come first served. */
873 else
874 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
875 return VINF_SUCCESS;
876}
877
878
879/**
880 * Calculates the CPU mode.
881 *
882 * This is mainly for updating IEMCPU::enmCpuMode.
883 *
884 * @returns CPU mode.
885 * @param pCtx The register context for the CPU.
886 */
887DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
888{
889 if (CPUMIsGuestIn64BitCodeEx(pCtx))
890 return IEMMODE_64BIT;
891 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
892 return IEMMODE_32BIT;
893 return IEMMODE_16BIT;
894}
895
896
897/**
898 * Initializes the execution state.
899 *
900 * @param pVCpu The cross context virtual CPU structure of the
901 * calling thread.
902 * @param fBypassHandlers Whether to bypass access handlers.
903 *
904 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
905 * side-effects in strict builds.
906 */
907DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
908{
909 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
910
911 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
912
913#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
915 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
918 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
920 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
921 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
922#endif
923
924#ifdef VBOX_WITH_RAW_MODE_NOT_R0
925 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
926#endif
927 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
928 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
929#ifdef VBOX_STRICT
930 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
931 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
932 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
933 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
934 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
935 pVCpu->iem.s.uRexReg = 127;
936 pVCpu->iem.s.uRexB = 127;
937 pVCpu->iem.s.uRexIndex = 127;
938 pVCpu->iem.s.iEffSeg = 127;
939 pVCpu->iem.s.idxPrefix = 127;
940 pVCpu->iem.s.uVex3rdReg = 127;
941 pVCpu->iem.s.uVexLength = 127;
942 pVCpu->iem.s.fEvexStuff = 127;
943 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
944# ifdef IEM_WITH_CODE_TLB
945 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
946 pVCpu->iem.s.pbInstrBuf = NULL;
947 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
948 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
949 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
950 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
951# else
952 pVCpu->iem.s.offOpcode = 127;
953 pVCpu->iem.s.cbOpcode = 127;
954# endif
955#endif
956
957 pVCpu->iem.s.cActiveMappings = 0;
958 pVCpu->iem.s.iNextMapping = 0;
959 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
960 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
963 && pCtx->cs.u64Base == 0
964 && pCtx->cs.u32Limit == UINT32_MAX
965 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
966 if (!pVCpu->iem.s.fInPatchCode)
967 CPUMRawLeave(pVCpu, VINF_SUCCESS);
968#endif
969
970#ifdef IEM_VERIFICATION_MODE_FULL
971 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
972 pVCpu->iem.s.fNoRem = true;
973#endif
974}
975
976
977/**
978 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
979 *
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling thread.
982 */
983DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
984{
985 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
986#ifdef IEM_VERIFICATION_MODE_FULL
987 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
988#endif
989#ifdef VBOX_STRICT
990# ifdef IEM_WITH_CODE_TLB
991 NOREF(pVCpu);
992# else
993 pVCpu->iem.s.cbOpcode = 0;
994# endif
995#else
996 NOREF(pVCpu);
997#endif
998}
999
1000
1001/**
1002 * Initializes the decoder state.
1003 *
1004 * iemReInitDecoder is mostly a copy of this function.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 * @param fBypassHandlers Whether to bypass access handlers.
1009 */
1010DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1011{
1012 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1013
1014 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1015
1016#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1025#endif
1026
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1029#endif
1030 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1031#ifdef IEM_VERIFICATION_MODE_FULL
1032 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1033 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1034#endif
1035 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1036 pVCpu->iem.s.enmCpuMode = enmMode;
1037 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1038 pVCpu->iem.s.enmEffAddrMode = enmMode;
1039 if (enmMode != IEMMODE_64BIT)
1040 {
1041 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1042 pVCpu->iem.s.enmEffOpSize = enmMode;
1043 }
1044 else
1045 {
1046 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1047 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1048 }
1049 pVCpu->iem.s.fPrefixes = 0;
1050 pVCpu->iem.s.uRexReg = 0;
1051 pVCpu->iem.s.uRexB = 0;
1052 pVCpu->iem.s.uRexIndex = 0;
1053 pVCpu->iem.s.idxPrefix = 0;
1054 pVCpu->iem.s.uVex3rdReg = 0;
1055 pVCpu->iem.s.uVexLength = 0;
1056 pVCpu->iem.s.fEvexStuff = 0;
1057 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1058#ifdef IEM_WITH_CODE_TLB
1059 pVCpu->iem.s.pbInstrBuf = NULL;
1060 pVCpu->iem.s.offInstrNextByte = 0;
1061 pVCpu->iem.s.offCurInstrStart = 0;
1062# ifdef VBOX_STRICT
1063 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1064 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1065 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1066# endif
1067#else
1068 pVCpu->iem.s.offOpcode = 0;
1069 pVCpu->iem.s.cbOpcode = 0;
1070#endif
1071 pVCpu->iem.s.cActiveMappings = 0;
1072 pVCpu->iem.s.iNextMapping = 0;
1073 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1074 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1077 && pCtx->cs.u64Base == 0
1078 && pCtx->cs.u32Limit == UINT32_MAX
1079 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1080 if (!pVCpu->iem.s.fInPatchCode)
1081 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1082#endif
1083
1084#ifdef DBGFTRACE_ENABLED
1085 switch (enmMode)
1086 {
1087 case IEMMODE_64BIT:
1088 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1089 break;
1090 case IEMMODE_32BIT:
1091 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1092 break;
1093 case IEMMODE_16BIT:
1094 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1095 break;
1096 }
1097#endif
1098}
1099
1100
1101/**
1102 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1103 *
1104 * This is mostly a copy of iemInitDecoder.
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1107 */
1108DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1109{
1110 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1111
1112 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1113
1114#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1123#endif
1124
1125 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1126#ifdef IEM_VERIFICATION_MODE_FULL
1127 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1128 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1129#endif
1130 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1131 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1132 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1133 pVCpu->iem.s.enmEffAddrMode = enmMode;
1134 if (enmMode != IEMMODE_64BIT)
1135 {
1136 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1137 pVCpu->iem.s.enmEffOpSize = enmMode;
1138 }
1139 else
1140 {
1141 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1142 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1143 }
1144 pVCpu->iem.s.fPrefixes = 0;
1145 pVCpu->iem.s.uRexReg = 0;
1146 pVCpu->iem.s.uRexB = 0;
1147 pVCpu->iem.s.uRexIndex = 0;
1148 pVCpu->iem.s.idxPrefix = 0;
1149 pVCpu->iem.s.uVex3rdReg = 0;
1150 pVCpu->iem.s.uVexLength = 0;
1151 pVCpu->iem.s.fEvexStuff = 0;
1152 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1153#ifdef IEM_WITH_CODE_TLB
1154 if (pVCpu->iem.s.pbInstrBuf)
1155 {
1156 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1157 - pVCpu->iem.s.uInstrBufPc;
1158 if (off < pVCpu->iem.s.cbInstrBufTotal)
1159 {
1160 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1161 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1162 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1163 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1164 else
1165 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1166 }
1167 else
1168 {
1169 pVCpu->iem.s.pbInstrBuf = NULL;
1170 pVCpu->iem.s.offInstrNextByte = 0;
1171 pVCpu->iem.s.offCurInstrStart = 0;
1172 pVCpu->iem.s.cbInstrBuf = 0;
1173 pVCpu->iem.s.cbInstrBufTotal = 0;
1174 }
1175 }
1176 else
1177 {
1178 pVCpu->iem.s.offInstrNextByte = 0;
1179 pVCpu->iem.s.offCurInstrStart = 0;
1180 pVCpu->iem.s.cbInstrBuf = 0;
1181 pVCpu->iem.s.cbInstrBufTotal = 0;
1182 }
1183#else
1184 pVCpu->iem.s.cbOpcode = 0;
1185 pVCpu->iem.s.offOpcode = 0;
1186#endif
1187 Assert(pVCpu->iem.s.cActiveMappings == 0);
1188 pVCpu->iem.s.iNextMapping = 0;
1189 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1190 Assert(pVCpu->iem.s.fBypassHandlers == false);
1191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1192 if (!pVCpu->iem.s.fInPatchCode)
1193 { /* likely */ }
1194 else
1195 {
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202 }
1203#endif
1204
1205#ifdef DBGFTRACE_ENABLED
1206 switch (enmMode)
1207 {
1208 case IEMMODE_64BIT:
1209 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1210 break;
1211 case IEMMODE_32BIT:
1212 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1213 break;
1214 case IEMMODE_16BIT:
1215 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1216 break;
1217 }
1218#endif
1219}
1220
1221
1222
1223/**
1224 * Prefetch opcodes the first time when starting executing.
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pVCpu The cross context virtual CPU structure of the
1228 * calling thread.
1229 * @param fBypassHandlers Whether to bypass access handlers.
1230 */
1231IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1232{
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1235#endif
1236 iemInitDecoder(pVCpu, fBypassHandlers);
1237
1238#ifdef IEM_WITH_CODE_TLB
1239 /** @todo Do ITLB lookup here. */
1240
1241#else /* !IEM_WITH_CODE_TLB */
1242
1243 /*
1244 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1245 *
1246 * First translate CS:rIP to a physical address.
1247 */
1248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1249 uint32_t cbToTryRead;
1250 RTGCPTR GCPtrPC;
1251 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1252 {
1253 cbToTryRead = PAGE_SIZE;
1254 GCPtrPC = pCtx->rip;
1255 if (IEM_IS_CANONICAL(GCPtrPC))
1256 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1257 else
1258 return iemRaiseGeneralProtectionFault0(pVCpu);
1259 }
1260 else
1261 {
1262 uint32_t GCPtrPC32 = pCtx->eip;
1263 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1264 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1265 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1266 else
1267 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1268 if (cbToTryRead) { /* likely */ }
1269 else /* overflowed */
1270 {
1271 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1272 cbToTryRead = UINT32_MAX;
1273 }
1274 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1275 Assert(GCPtrPC <= UINT32_MAX);
1276 }
1277
1278# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1279 /* Allow interpretation of patch manager code blocks since they can for
1280 instance throw #PFs for perfectly good reasons. */
1281 if (pVCpu->iem.s.fInPatchCode)
1282 {
1283 size_t cbRead = 0;
1284 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1285 AssertRCReturn(rc, rc);
1286 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1287 return VINF_SUCCESS;
1288 }
1289# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1290
1291 RTGCPHYS GCPhys;
1292 uint64_t fFlags;
1293 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1294 if (RT_SUCCESS(rc)) { /* probable */ }
1295 else
1296 {
1297 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1298 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1299 }
1300 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1301 else
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1304 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1305 }
1306 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1307 else
1308 {
1309 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1310 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1311 }
1312 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1313 /** @todo Check reserved bits and such stuff. PGM is better at doing
1314 * that, so do it when implementing the guest virtual address
1315 * TLB... */
1316
1317# ifdef IEM_VERIFICATION_MODE_FULL
1318 /*
1319 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1320 * instruction.
1321 */
1322 /** @todo optimize this differently by not using PGMPhysRead. */
1323 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1324 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1325 if ( offPrevOpcodes < cbOldOpcodes
1326 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1327 {
1328 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1329 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1330 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1331 pVCpu->iem.s.cbOpcode = cbNew;
1332 return VINF_SUCCESS;
1333 }
1334# endif
1335
1336 /*
1337 * Read the bytes at this address.
1338 */
1339 PVM pVM = pVCpu->CTX_SUFF(pVM);
1340# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1341 size_t cbActual;
1342 if ( PATMIsEnabled(pVM)
1343 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1344 {
1345 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1346 Assert(cbActual > 0);
1347 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1348 }
1349 else
1350# endif
1351 {
1352 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1353 if (cbToTryRead > cbLeftOnPage)
1354 cbToTryRead = cbLeftOnPage;
1355 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1356 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1357
1358 if (!pVCpu->iem.s.fBypassHandlers)
1359 {
1360 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1362 { /* likely */ }
1363 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1364 {
1365 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1366 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1368 }
1369 else
1370 {
1371 Log((RT_SUCCESS(rcStrict)
1372 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1373 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1374 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1375 return rcStrict;
1376 }
1377 }
1378 else
1379 {
1380 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1381 if (RT_SUCCESS(rc))
1382 { /* likely */ }
1383 else
1384 {
1385 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1386 GCPtrPC, GCPhys, rc, cbToTryRead));
1387 return rc;
1388 }
1389 }
1390 pVCpu->iem.s.cbOpcode = cbToTryRead;
1391 }
1392#endif /* !IEM_WITH_CODE_TLB */
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Invalidates the IEM TLBs.
1399 *
1400 * This is called internally as well as by PGM when moving GC mappings.
1401 *
1402 * @returns
1403 * @param pVCpu The cross context virtual CPU structure of the calling
1404 * thread.
1405 * @param fVmm Set when PGM calls us with a remapping.
1406 */
1407VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1408{
1409#ifdef IEM_WITH_CODE_TLB
1410 pVCpu->iem.s.cbInstrBufTotal = 0;
1411 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1412 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1413 { /* very likely */ }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1417 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1418 while (i-- > 0)
1419 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1420 }
1421#endif
1422
1423#ifdef IEM_WITH_DATA_TLB
1424 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1425 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1426 { /* very likely */ }
1427 else
1428 {
1429 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1430 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1431 while (i-- > 0)
1432 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1433 }
1434#endif
1435 NOREF(pVCpu); NOREF(fVmm);
1436}
1437
1438
1439/**
1440 * Invalidates a page in the TLBs.
1441 *
1442 * @param pVCpu The cross context virtual CPU structure of the calling
1443 * thread.
1444 * @param GCPtr The address of the page to invalidate
1445 */
1446VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1447{
1448#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1449 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1450 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1451 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1452 uintptr_t idx = (uint8_t)GCPtr;
1453
1454# ifdef IEM_WITH_CODE_TLB
1455 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1456 {
1457 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1458 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1459 pVCpu->iem.s.cbInstrBufTotal = 0;
1460 }
1461# endif
1462
1463# ifdef IEM_WITH_DATA_TLB
1464 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1465 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1466# endif
1467#else
1468 NOREF(pVCpu); NOREF(GCPtr);
1469#endif
1470}
1471
1472
1473/**
1474 * Invalidates the host physical aspects of the IEM TLBs.
1475 *
1476 * This is called internally as well as by PGM when moving GC mappings.
1477 *
1478 * @param pVCpu The cross context virtual CPU structure of the calling
1479 * thread.
1480 */
1481VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1482{
1483#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1484 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1485
1486# ifdef IEM_WITH_CODE_TLB
1487 pVCpu->iem.s.cbInstrBufTotal = 0;
1488# endif
1489 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1490 if (uTlbPhysRev != 0)
1491 {
1492 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1493 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1494 }
1495 else
1496 {
1497 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1498 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1499
1500 unsigned i;
1501# ifdef IEM_WITH_CODE_TLB
1502 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1503 while (i-- > 0)
1504 {
1505 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1506 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1507 }
1508# endif
1509# ifdef IEM_WITH_DATA_TLB
1510 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1511 while (i-- > 0)
1512 {
1513 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1514 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1515 }
1516# endif
1517 }
1518#else
1519 NOREF(pVCpu);
1520#endif
1521}
1522
1523
1524/**
1525 * Invalidates the host physical aspects of the IEM TLBs.
1526 *
1527 * This is called internally as well as by PGM when moving GC mappings.
1528 *
1529 * @param pVM The cross context VM structure.
1530 *
1531 * @remarks Caller holds the PGM lock.
1532 */
1533VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1534{
1535 RT_NOREF_PV(pVM);
1536}
1537
1538#ifdef IEM_WITH_CODE_TLB
1539
1540/**
1541 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1542 * failure and jumps.
1543 *
1544 * We end up here for a number of reasons:
1545 * - pbInstrBuf isn't yet initialized.
1546 * - Advancing beyond the buffer boundrary (e.g. cross page).
1547 * - Advancing beyond the CS segment limit.
1548 * - Fetching from non-mappable page (e.g. MMIO).
1549 *
1550 * @param pVCpu The cross context virtual CPU structure of the
1551 * calling thread.
1552 * @param pvDst Where to return the bytes.
1553 * @param cbDst Number of bytes to read.
1554 *
1555 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1556 */
1557IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1558{
1559#ifdef IN_RING3
1560//__debugbreak();
1561 for (;;)
1562 {
1563 Assert(cbDst <= 8);
1564 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1565
1566 /*
1567 * We might have a partial buffer match, deal with that first to make the
1568 * rest simpler. This is the first part of the cross page/buffer case.
1569 */
1570 if (pVCpu->iem.s.pbInstrBuf != NULL)
1571 {
1572 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1573 {
1574 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1575 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1576 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1577
1578 cbDst -= cbCopy;
1579 pvDst = (uint8_t *)pvDst + cbCopy;
1580 offBuf += cbCopy;
1581 pVCpu->iem.s.offInstrNextByte += offBuf;
1582 }
1583 }
1584
1585 /*
1586 * Check segment limit, figuring how much we're allowed to access at this point.
1587 *
1588 * We will fault immediately if RIP is past the segment limit / in non-canonical
1589 * territory. If we do continue, there are one or more bytes to read before we
1590 * end up in trouble and we need to do that first before faulting.
1591 */
1592 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1593 RTGCPTR GCPtrFirst;
1594 uint32_t cbMaxRead;
1595 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1596 {
1597 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1598 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1599 { /* likely */ }
1600 else
1601 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1602 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1603 }
1604 else
1605 {
1606 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1607 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1608 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1609 { /* likely */ }
1610 else
1611 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1612 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1613 if (cbMaxRead != 0)
1614 { /* likely */ }
1615 else
1616 {
1617 /* Overflowed because address is 0 and limit is max. */
1618 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1619 cbMaxRead = X86_PAGE_SIZE;
1620 }
1621 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1622 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1623 if (cbMaxRead2 < cbMaxRead)
1624 cbMaxRead = cbMaxRead2;
1625 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1626 }
1627
1628 /*
1629 * Get the TLB entry for this piece of code.
1630 */
1631 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1632 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1633 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1634 if (pTlbe->uTag == uTag)
1635 {
1636 /* likely when executing lots of code, otherwise unlikely */
1637# ifdef VBOX_WITH_STATISTICS
1638 pVCpu->iem.s.CodeTlb.cTlbHits++;
1639# endif
1640 }
1641 else
1642 {
1643 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1644# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1645 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1646 {
1647 pTlbe->uTag = uTag;
1648 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1649 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1650 pTlbe->GCPhys = NIL_RTGCPHYS;
1651 pTlbe->pbMappingR3 = NULL;
1652 }
1653 else
1654# endif
1655 {
1656 RTGCPHYS GCPhys;
1657 uint64_t fFlags;
1658 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1659 if (RT_FAILURE(rc))
1660 {
1661 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1662 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1663 }
1664
1665 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1666 pTlbe->uTag = uTag;
1667 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1668 pTlbe->GCPhys = GCPhys;
1669 pTlbe->pbMappingR3 = NULL;
1670 }
1671 }
1672
1673 /*
1674 * Check TLB page table level access flags.
1675 */
1676 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1677 {
1678 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1679 {
1680 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1681 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1682 }
1683 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1684 {
1685 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1686 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1687 }
1688 }
1689
1690# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1691 /*
1692 * Allow interpretation of patch manager code blocks since they can for
1693 * instance throw #PFs for perfectly good reasons.
1694 */
1695 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1696 { /* no unlikely */ }
1697 else
1698 {
1699 /** @todo Could be optimized this a little in ring-3 if we liked. */
1700 size_t cbRead = 0;
1701 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1702 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1703 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1704 return;
1705 }
1706# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1707
1708 /*
1709 * Look up the physical page info if necessary.
1710 */
1711 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1712 { /* not necessary */ }
1713 else
1714 {
1715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1716 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1717 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1721 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1722 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1723 }
1724
1725# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1726 /*
1727 * Try do a direct read using the pbMappingR3 pointer.
1728 */
1729 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1730 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1731 {
1732 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1733 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1734 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1735 {
1736 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1737 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1738 }
1739 else
1740 {
1741 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1742 Assert(cbInstr < cbMaxRead);
1743 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1744 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1745 }
1746 if (cbDst <= cbMaxRead)
1747 {
1748 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1749 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1750 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1751 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1752 return;
1753 }
1754 pVCpu->iem.s.pbInstrBuf = NULL;
1755
1756 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1757 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1758 }
1759 else
1760# endif
1761#if 0
1762 /*
1763 * If there is no special read handling, so we can read a bit more and
1764 * put it in the prefetch buffer.
1765 */
1766 if ( cbDst < cbMaxRead
1767 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1768 {
1769 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1770 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1772 { /* likely */ }
1773 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1776 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1777 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1778 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1779 }
1780 else
1781 {
1782 Log((RT_SUCCESS(rcStrict)
1783 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1784 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1785 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1786 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1787 }
1788 }
1789 /*
1790 * Special read handling, so only read exactly what's needed.
1791 * This is a highly unlikely scenario.
1792 */
1793 else
1794#endif
1795 {
1796 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1797 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1798 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1799 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1801 { /* likely */ }
1802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1803 {
1804 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1805 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1806 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1807 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1808 }
1809 else
1810 {
1811 Log((RT_SUCCESS(rcStrict)
1812 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1813 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1814 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1815 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1816 }
1817 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1818 if (cbToRead == cbDst)
1819 return;
1820 }
1821
1822 /*
1823 * More to read, loop.
1824 */
1825 cbDst -= cbMaxRead;
1826 pvDst = (uint8_t *)pvDst + cbMaxRead;
1827 }
1828#else
1829 RT_NOREF(pvDst, cbDst);
1830 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1831#endif
1832}
1833
1834#else
1835
1836/**
1837 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1838 * exception if it fails.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pVCpu The cross context virtual CPU structure of the
1842 * calling thread.
1843 * @param cbMin The minimum number of bytes relative offOpcode
1844 * that must be read.
1845 */
1846IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1847{
1848 /*
1849 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1850 *
1851 * First translate CS:rIP to a physical address.
1852 */
1853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1854 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1855 uint32_t cbToTryRead;
1856 RTGCPTR GCPtrNext;
1857 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1858 {
1859 cbToTryRead = PAGE_SIZE;
1860 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1861 if (!IEM_IS_CANONICAL(GCPtrNext))
1862 return iemRaiseGeneralProtectionFault0(pVCpu);
1863 }
1864 else
1865 {
1866 uint32_t GCPtrNext32 = pCtx->eip;
1867 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1868 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1869 if (GCPtrNext32 > pCtx->cs.u32Limit)
1870 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1871 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1872 if (!cbToTryRead) /* overflowed */
1873 {
1874 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1875 cbToTryRead = UINT32_MAX;
1876 /** @todo check out wrapping around the code segment. */
1877 }
1878 if (cbToTryRead < cbMin - cbLeft)
1879 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1880 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1881 }
1882
1883 /* Only read up to the end of the page, and make sure we don't read more
1884 than the opcode buffer can hold. */
1885 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1886 if (cbToTryRead > cbLeftOnPage)
1887 cbToTryRead = cbLeftOnPage;
1888 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1889 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1890/** @todo r=bird: Convert assertion into undefined opcode exception? */
1891 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1892
1893# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1894 /* Allow interpretation of patch manager code blocks since they can for
1895 instance throw #PFs for perfectly good reasons. */
1896 if (pVCpu->iem.s.fInPatchCode)
1897 {
1898 size_t cbRead = 0;
1899 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1900 AssertRCReturn(rc, rc);
1901 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1902 return VINF_SUCCESS;
1903 }
1904# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1905
1906 RTGCPHYS GCPhys;
1907 uint64_t fFlags;
1908 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1909 if (RT_FAILURE(rc))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1912 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1913 }
1914 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1915 {
1916 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1917 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1918 }
1919 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1922 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1923 }
1924 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1925 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1926 /** @todo Check reserved bits and such stuff. PGM is better at doing
1927 * that, so do it when implementing the guest virtual address
1928 * TLB... */
1929
1930 /*
1931 * Read the bytes at this address.
1932 *
1933 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1934 * and since PATM should only patch the start of an instruction there
1935 * should be no need to check again here.
1936 */
1937 if (!pVCpu->iem.s.fBypassHandlers)
1938 {
1939 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1940 cbToTryRead, PGMACCESSORIGIN_IEM);
1941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1942 { /* likely */ }
1943 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1944 {
1945 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1946 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1947 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1948 }
1949 else
1950 {
1951 Log((RT_SUCCESS(rcStrict)
1952 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1953 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1954 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1955 return rcStrict;
1956 }
1957 }
1958 else
1959 {
1960 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1961 if (RT_SUCCESS(rc))
1962 { /* likely */ }
1963 else
1964 {
1965 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1966 return rc;
1967 }
1968 }
1969 pVCpu->iem.s.cbOpcode += cbToTryRead;
1970 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1971
1972 return VINF_SUCCESS;
1973}
1974
1975#endif /* !IEM_WITH_CODE_TLB */
1976#ifndef IEM_WITH_SETJMP
1977
1978/**
1979 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1980 *
1981 * @returns Strict VBox status code.
1982 * @param pVCpu The cross context virtual CPU structure of the
1983 * calling thread.
1984 * @param pb Where to return the opcode byte.
1985 */
1986DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1987{
1988 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1989 if (rcStrict == VINF_SUCCESS)
1990 {
1991 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1992 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1993 pVCpu->iem.s.offOpcode = offOpcode + 1;
1994 }
1995 else
1996 *pb = 0;
1997 return rcStrict;
1998}
1999
2000
2001/**
2002 * Fetches the next opcode byte.
2003 *
2004 * @returns Strict VBox status code.
2005 * @param pVCpu The cross context virtual CPU structure of the
2006 * calling thread.
2007 * @param pu8 Where to return the opcode byte.
2008 */
2009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2010{
2011 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2012 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2013 {
2014 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2015 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2016 return VINF_SUCCESS;
2017 }
2018 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2019}
2020
2021#else /* IEM_WITH_SETJMP */
2022
2023/**
2024 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2025 *
2026 * @returns The opcode byte.
2027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2028 */
2029DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2030{
2031# ifdef IEM_WITH_CODE_TLB
2032 uint8_t u8;
2033 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2034 return u8;
2035# else
2036 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2037 if (rcStrict == VINF_SUCCESS)
2038 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2039 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2040# endif
2041}
2042
2043
2044/**
2045 * Fetches the next opcode byte, longjmp on error.
2046 *
2047 * @returns The opcode byte.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 */
2050DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2051{
2052# ifdef IEM_WITH_CODE_TLB
2053 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2054 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2055 if (RT_LIKELY( pbBuf != NULL
2056 && offBuf < pVCpu->iem.s.cbInstrBuf))
2057 {
2058 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2059 return pbBuf[offBuf];
2060 }
2061# else
2062 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 return pVCpu->iem.s.abOpcode[offOpcode];
2067 }
2068# endif
2069 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2070}
2071
2072#endif /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Fetches the next opcode byte, returns automatically on failure.
2076 *
2077 * @param a_pu8 Where to return the opcode byte.
2078 * @remark Implicitly references pVCpu.
2079 */
2080#ifndef IEM_WITH_SETJMP
2081# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2082 do \
2083 { \
2084 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2085 if (rcStrict2 == VINF_SUCCESS) \
2086 { /* likely */ } \
2087 else \
2088 return rcStrict2; \
2089 } while (0)
2090#else
2091# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2092#endif /* IEM_WITH_SETJMP */
2093
2094
2095#ifndef IEM_WITH_SETJMP
2096/**
2097 * Fetches the next signed byte from the opcode stream.
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 * @param pi8 Where to return the signed byte.
2102 */
2103DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2104{
2105 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2106}
2107#endif /* !IEM_WITH_SETJMP */
2108
2109
2110/**
2111 * Fetches the next signed byte from the opcode stream, returning automatically
2112 * on failure.
2113 *
2114 * @param a_pi8 Where to return the signed byte.
2115 * @remark Implicitly references pVCpu.
2116 */
2117#ifndef IEM_WITH_SETJMP
2118# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2119 do \
2120 { \
2121 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2122 if (rcStrict2 != VINF_SUCCESS) \
2123 return rcStrict2; \
2124 } while (0)
2125#else /* IEM_WITH_SETJMP */
2126# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2127
2128#endif /* IEM_WITH_SETJMP */
2129
2130#ifndef IEM_WITH_SETJMP
2131
2132/**
2133 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 * @param pu16 Where to return the opcode dword.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2140{
2141 uint8_t u8;
2142 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2143 if (rcStrict == VINF_SUCCESS)
2144 *pu16 = (int8_t)u8;
2145 return rcStrict;
2146}
2147
2148
2149/**
2150 * Fetches the next signed byte from the opcode stream, extending it to
2151 * unsigned 16-bit.
2152 *
2153 * @returns Strict VBox status code.
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pu16 Where to return the unsigned word.
2156 */
2157DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2158{
2159 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2160 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2161 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2162
2163 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2164 pVCpu->iem.s.offOpcode = offOpcode + 1;
2165 return VINF_SUCCESS;
2166}
2167
2168#endif /* !IEM_WITH_SETJMP */
2169
2170/**
2171 * Fetches the next signed byte from the opcode stream and sign-extending it to
2172 * a word, returning automatically on failure.
2173 *
2174 * @param a_pu16 Where to return the word.
2175 * @remark Implicitly references pVCpu.
2176 */
2177#ifndef IEM_WITH_SETJMP
2178# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2179 do \
2180 { \
2181 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2182 if (rcStrict2 != VINF_SUCCESS) \
2183 return rcStrict2; \
2184 } while (0)
2185#else
2186# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2187#endif
2188
2189#ifndef IEM_WITH_SETJMP
2190
2191/**
2192 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2196 * @param pu32 Where to return the opcode dword.
2197 */
2198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2199{
2200 uint8_t u8;
2201 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2202 if (rcStrict == VINF_SUCCESS)
2203 *pu32 = (int8_t)u8;
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Fetches the next signed byte from the opcode stream, extending it to
2210 * unsigned 32-bit.
2211 *
2212 * @returns Strict VBox status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param pu32 Where to return the unsigned dword.
2215 */
2216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2217{
2218 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2219 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2220 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2221
2222 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2223 pVCpu->iem.s.offOpcode = offOpcode + 1;
2224 return VINF_SUCCESS;
2225}
2226
2227#endif /* !IEM_WITH_SETJMP */
2228
2229/**
2230 * Fetches the next signed byte from the opcode stream and sign-extending it to
2231 * a word, returning automatically on failure.
2232 *
2233 * @param a_pu32 Where to return the word.
2234 * @remark Implicitly references pVCpu.
2235 */
2236#ifndef IEM_WITH_SETJMP
2237#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2238 do \
2239 { \
2240 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2241 if (rcStrict2 != VINF_SUCCESS) \
2242 return rcStrict2; \
2243 } while (0)
2244#else
2245# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2246#endif
2247
2248#ifndef IEM_WITH_SETJMP
2249
2250/**
2251 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pu64 Where to return the opcode qword.
2256 */
2257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2258{
2259 uint8_t u8;
2260 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2261 if (rcStrict == VINF_SUCCESS)
2262 *pu64 = (int8_t)u8;
2263 return rcStrict;
2264}
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, extending it to
2269 * unsigned 64-bit.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pu64 Where to return the unsigned qword.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2276{
2277 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2278 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2279 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2280
2281 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2282 pVCpu->iem.s.offOpcode = offOpcode + 1;
2283 return VINF_SUCCESS;
2284}
2285
2286#endif /* !IEM_WITH_SETJMP */
2287
2288
2289/**
2290 * Fetches the next signed byte from the opcode stream and sign-extending it to
2291 * a word, returning automatically on failure.
2292 *
2293 * @param a_pu64 Where to return the word.
2294 * @remark Implicitly references pVCpu.
2295 */
2296#ifndef IEM_WITH_SETJMP
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2298 do \
2299 { \
2300 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2301 if (rcStrict2 != VINF_SUCCESS) \
2302 return rcStrict2; \
2303 } while (0)
2304#else
2305# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2306#endif
2307
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu16 Where to return the opcode word.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2319{
2320 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2326# else
2327 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2328# endif
2329 pVCpu->iem.s.offOpcode = offOpcode + 2;
2330 }
2331 else
2332 *pu16 = 0;
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * Fetches the next opcode word.
2339 *
2340 * @returns Strict VBox status code.
2341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2342 * @param pu16 Where to return the opcode word.
2343 */
2344DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2345{
2346 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2347 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2348 {
2349 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2350# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2351 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2352# else
2353 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2354# endif
2355 return VINF_SUCCESS;
2356 }
2357 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2358}
2359
2360#else /* IEM_WITH_SETJMP */
2361
2362/**
2363 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2364 *
2365 * @returns The opcode word.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 */
2368DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2369{
2370# ifdef IEM_WITH_CODE_TLB
2371 uint16_t u16;
2372 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2373 return u16;
2374# else
2375 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2376 if (rcStrict == VINF_SUCCESS)
2377 {
2378 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2379 pVCpu->iem.s.offOpcode += 2;
2380# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2381 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2382# else
2383 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2384# endif
2385 }
2386 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2387# endif
2388}
2389
2390
2391/**
2392 * Fetches the next opcode word, longjmp on error.
2393 *
2394 * @returns The opcode word.
2395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2396 */
2397DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2398{
2399# ifdef IEM_WITH_CODE_TLB
2400 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2401 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2402 if (RT_LIKELY( pbBuf != NULL
2403 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2404 {
2405 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2406# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2407 return *(uint16_t const *)&pbBuf[offBuf];
2408# else
2409 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2410# endif
2411 }
2412# else
2413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2414 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2415 {
2416 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2419# else
2420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2421# endif
2422 }
2423# endif
2424 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2425}
2426
2427#endif /* IEM_WITH_SETJMP */
2428
2429
2430/**
2431 * Fetches the next opcode word, returns automatically on failure.
2432 *
2433 * @param a_pu16 Where to return the opcode word.
2434 * @remark Implicitly references pVCpu.
2435 */
2436#ifndef IEM_WITH_SETJMP
2437# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2438 do \
2439 { \
2440 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2441 if (rcStrict2 != VINF_SUCCESS) \
2442 return rcStrict2; \
2443 } while (0)
2444#else
2445# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2446#endif
2447
2448#ifndef IEM_WITH_SETJMP
2449
2450/**
2451 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2452 *
2453 * @returns Strict VBox status code.
2454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2455 * @param pu32 Where to return the opcode double word.
2456 */
2457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2458{
2459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2460 if (rcStrict == VINF_SUCCESS)
2461 {
2462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2463 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2464 pVCpu->iem.s.offOpcode = offOpcode + 2;
2465 }
2466 else
2467 *pu32 = 0;
2468 return rcStrict;
2469}
2470
2471
2472/**
2473 * Fetches the next opcode word, zero extending it to a double word.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2477 * @param pu32 Where to return the opcode double word.
2478 */
2479DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2480{
2481 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2482 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2483 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2484
2485 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486 pVCpu->iem.s.offOpcode = offOpcode + 2;
2487 return VINF_SUCCESS;
2488}
2489
2490#endif /* !IEM_WITH_SETJMP */
2491
2492
2493/**
2494 * Fetches the next opcode word and zero extends it to a double word, returns
2495 * automatically on failure.
2496 *
2497 * @param a_pu32 Where to return the opcode double word.
2498 * @remark Implicitly references pVCpu.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2502 do \
2503 { \
2504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2505 if (rcStrict2 != VINF_SUCCESS) \
2506 return rcStrict2; \
2507 } while (0)
2508#else
2509# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2510#endif
2511
2512#ifndef IEM_WITH_SETJMP
2513
2514/**
2515 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2516 *
2517 * @returns Strict VBox status code.
2518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2519 * @param pu64 Where to return the opcode quad word.
2520 */
2521DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2522{
2523 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2524 if (rcStrict == VINF_SUCCESS)
2525 {
2526 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2527 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528 pVCpu->iem.s.offOpcode = offOpcode + 2;
2529 }
2530 else
2531 *pu64 = 0;
2532 return rcStrict;
2533}
2534
2535
2536/**
2537 * Fetches the next opcode word, zero extending it to a quad word.
2538 *
2539 * @returns Strict VBox status code.
2540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2541 * @param pu64 Where to return the opcode quad word.
2542 */
2543DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2544{
2545 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2546 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2547 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2548
2549 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2550 pVCpu->iem.s.offOpcode = offOpcode + 2;
2551 return VINF_SUCCESS;
2552}
2553
2554#endif /* !IEM_WITH_SETJMP */
2555
2556/**
2557 * Fetches the next opcode word and zero extends it to a quad word, returns
2558 * automatically on failure.
2559 *
2560 * @param a_pu64 Where to return the opcode quad word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575
2576#ifndef IEM_WITH_SETJMP
2577/**
2578 * Fetches the next signed word from the opcode stream.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pi16 Where to return the signed word.
2583 */
2584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2585{
2586 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2587}
2588#endif /* !IEM_WITH_SETJMP */
2589
2590
2591/**
2592 * Fetches the next signed word from the opcode stream, returning automatically
2593 * on failure.
2594 *
2595 * @param a_pi16 Where to return the signed word.
2596 * @remark Implicitly references pVCpu.
2597 */
2598#ifndef IEM_WITH_SETJMP
2599# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2600 do \
2601 { \
2602 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2603 if (rcStrict2 != VINF_SUCCESS) \
2604 return rcStrict2; \
2605 } while (0)
2606#else
2607# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2608#endif
2609
2610#ifndef IEM_WITH_SETJMP
2611
2612/**
2613 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2614 *
2615 * @returns Strict VBox status code.
2616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2617 * @param pu32 Where to return the opcode dword.
2618 */
2619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2620{
2621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2622 if (rcStrict == VINF_SUCCESS)
2623 {
2624 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 pVCpu->iem.s.offOpcode = offOpcode + 4;
2634 }
2635 else
2636 *pu32 = 0;
2637 return rcStrict;
2638}
2639
2640
2641/**
2642 * Fetches the next opcode dword.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu32 Where to return the opcode double word.
2647 */
2648DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2649{
2650 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2651 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2652 {
2653 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2656# else
2657 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2658 pVCpu->iem.s.abOpcode[offOpcode + 1],
2659 pVCpu->iem.s.abOpcode[offOpcode + 2],
2660 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2661# endif
2662 return VINF_SUCCESS;
2663 }
2664 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2665}
2666
2667#else /* !IEM_WITH_SETJMP */
2668
2669/**
2670 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2671 *
2672 * @returns The opcode dword.
2673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2674 */
2675DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2676{
2677# ifdef IEM_WITH_CODE_TLB
2678 uint32_t u32;
2679 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2680 return u32;
2681# else
2682 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2683 if (rcStrict == VINF_SUCCESS)
2684 {
2685 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2686 pVCpu->iem.s.offOpcode = offOpcode + 4;
2687# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2688 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2689# else
2690 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2691 pVCpu->iem.s.abOpcode[offOpcode + 1],
2692 pVCpu->iem.s.abOpcode[offOpcode + 2],
2693 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2694# endif
2695 }
2696 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2697# endif
2698}
2699
2700
2701/**
2702 * Fetches the next opcode dword, longjmp on error.
2703 *
2704 * @returns The opcode dword.
2705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2706 */
2707DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2708{
2709# ifdef IEM_WITH_CODE_TLB
2710 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2711 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2712 if (RT_LIKELY( pbBuf != NULL
2713 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2714 {
2715 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2716# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2717 return *(uint32_t const *)&pbBuf[offBuf];
2718# else
2719 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2720 pbBuf[offBuf + 1],
2721 pbBuf[offBuf + 2],
2722 pbBuf[offBuf + 3]);
2723# endif
2724 }
2725# else
2726 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2727 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2728 {
2729 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2731 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2732# else
2733 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2734 pVCpu->iem.s.abOpcode[offOpcode + 1],
2735 pVCpu->iem.s.abOpcode[offOpcode + 2],
2736 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2737# endif
2738 }
2739# endif
2740 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2741}
2742
2743#endif /* !IEM_WITH_SETJMP */
2744
2745
2746/**
2747 * Fetches the next opcode dword, returns automatically on failure.
2748 *
2749 * @param a_pu32 Where to return the opcode dword.
2750 * @remark Implicitly references pVCpu.
2751 */
2752#ifndef IEM_WITH_SETJMP
2753# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2754 do \
2755 { \
2756 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2757 if (rcStrict2 != VINF_SUCCESS) \
2758 return rcStrict2; \
2759 } while (0)
2760#else
2761# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2762#endif
2763
2764#ifndef IEM_WITH_SETJMP
2765
2766/**
2767 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode dword.
2772 */
2773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2776 if (rcStrict == VINF_SUCCESS)
2777 {
2778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2779 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2780 pVCpu->iem.s.abOpcode[offOpcode + 1],
2781 pVCpu->iem.s.abOpcode[offOpcode + 2],
2782 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2783 pVCpu->iem.s.offOpcode = offOpcode + 4;
2784 }
2785 else
2786 *pu64 = 0;
2787 return rcStrict;
2788}
2789
2790
2791/**
2792 * Fetches the next opcode dword, zero extending it to a quad word.
2793 *
2794 * @returns Strict VBox status code.
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param pu64 Where to return the opcode quad word.
2797 */
2798DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2799{
2800 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2801 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2802 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2803
2804 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2805 pVCpu->iem.s.abOpcode[offOpcode + 1],
2806 pVCpu->iem.s.abOpcode[offOpcode + 2],
2807 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2808 pVCpu->iem.s.offOpcode = offOpcode + 4;
2809 return VINF_SUCCESS;
2810}
2811
2812#endif /* !IEM_WITH_SETJMP */
2813
2814
2815/**
2816 * Fetches the next opcode dword and zero extends it to a quad word, returns
2817 * automatically on failure.
2818 *
2819 * @param a_pu64 Where to return the opcode quad word.
2820 * @remark Implicitly references pVCpu.
2821 */
2822#ifndef IEM_WITH_SETJMP
2823# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2824 do \
2825 { \
2826 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2827 if (rcStrict2 != VINF_SUCCESS) \
2828 return rcStrict2; \
2829 } while (0)
2830#else
2831# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2832#endif
2833
2834
2835#ifndef IEM_WITH_SETJMP
2836/**
2837 * Fetches the next signed double word from the opcode stream.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pi32 Where to return the signed double word.
2842 */
2843DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2844{
2845 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2846}
2847#endif
2848
2849/**
2850 * Fetches the next signed double word from the opcode stream, returning
2851 * automatically on failure.
2852 *
2853 * @param a_pi32 Where to return the signed double word.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode qword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, sign extending it into a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2907
2908 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 *pu64 = i32;
2913 pVCpu->iem.s.offOpcode = offOpcode + 4;
2914 return VINF_SUCCESS;
2915}
2916
2917#endif /* !IEM_WITH_SETJMP */
2918
2919
2920/**
2921 * Fetches the next opcode double word and sign extends it to a quad word,
2922 * returns automatically on failure.
2923 *
2924 * @param a_pu64 Where to return the opcode quad word.
2925 * @remark Implicitly references pVCpu.
2926 */
2927#ifndef IEM_WITH_SETJMP
2928# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2929 do \
2930 { \
2931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2932 if (rcStrict2 != VINF_SUCCESS) \
2933 return rcStrict2; \
2934 } while (0)
2935#else
2936# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2937#endif
2938
2939#ifndef IEM_WITH_SETJMP
2940
2941/**
2942 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2943 *
2944 * @returns Strict VBox status code.
2945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2946 * @param pu64 Where to return the opcode qword.
2947 */
2948DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2949{
2950 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2951 if (rcStrict == VINF_SUCCESS)
2952 {
2953 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2956# else
2957 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2958 pVCpu->iem.s.abOpcode[offOpcode + 1],
2959 pVCpu->iem.s.abOpcode[offOpcode + 2],
2960 pVCpu->iem.s.abOpcode[offOpcode + 3],
2961 pVCpu->iem.s.abOpcode[offOpcode + 4],
2962 pVCpu->iem.s.abOpcode[offOpcode + 5],
2963 pVCpu->iem.s.abOpcode[offOpcode + 6],
2964 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2965# endif
2966 pVCpu->iem.s.offOpcode = offOpcode + 8;
2967 }
2968 else
2969 *pu64 = 0;
2970 return rcStrict;
2971}
2972
2973
2974/**
2975 * Fetches the next opcode qword.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2984 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2985 {
2986# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2987 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2988# else
2989 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2990 pVCpu->iem.s.abOpcode[offOpcode + 1],
2991 pVCpu->iem.s.abOpcode[offOpcode + 2],
2992 pVCpu->iem.s.abOpcode[offOpcode + 3],
2993 pVCpu->iem.s.abOpcode[offOpcode + 4],
2994 pVCpu->iem.s.abOpcode[offOpcode + 5],
2995 pVCpu->iem.s.abOpcode[offOpcode + 6],
2996 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2997# endif
2998 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2999 return VINF_SUCCESS;
3000 }
3001 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3002}
3003
3004#else /* IEM_WITH_SETJMP */
3005
3006/**
3007 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3008 *
3009 * @returns The opcode qword.
3010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3013{
3014# ifdef IEM_WITH_CODE_TLB
3015 uint64_t u64;
3016 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3017 return u64;
3018# else
3019 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3020 if (rcStrict == VINF_SUCCESS)
3021 {
3022 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3023 pVCpu->iem.s.offOpcode = offOpcode + 8;
3024# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3025 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3026# else
3027 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3028 pVCpu->iem.s.abOpcode[offOpcode + 1],
3029 pVCpu->iem.s.abOpcode[offOpcode + 2],
3030 pVCpu->iem.s.abOpcode[offOpcode + 3],
3031 pVCpu->iem.s.abOpcode[offOpcode + 4],
3032 pVCpu->iem.s.abOpcode[offOpcode + 5],
3033 pVCpu->iem.s.abOpcode[offOpcode + 6],
3034 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3035# endif
3036 }
3037 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3038# endif
3039}
3040
3041
3042/**
3043 * Fetches the next opcode qword, longjmp on error.
3044 *
3045 * @returns The opcode qword.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 */
3048DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3049{
3050# ifdef IEM_WITH_CODE_TLB
3051 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3052 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3053 if (RT_LIKELY( pbBuf != NULL
3054 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3055 {
3056 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 return *(uint64_t const *)&pbBuf[offBuf];
3059# else
3060 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3061 pbBuf[offBuf + 1],
3062 pbBuf[offBuf + 2],
3063 pbBuf[offBuf + 3],
3064 pbBuf[offBuf + 4],
3065 pbBuf[offBuf + 5],
3066 pbBuf[offBuf + 6],
3067 pbBuf[offBuf + 7]);
3068# endif
3069 }
3070# else
3071 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3072 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3073 {
3074 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3075# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3076 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3077# else
3078 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3],
3082 pVCpu->iem.s.abOpcode[offOpcode + 4],
3083 pVCpu->iem.s.abOpcode[offOpcode + 5],
3084 pVCpu->iem.s.abOpcode[offOpcode + 6],
3085 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3086# endif
3087 }
3088# endif
3089 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3090}
3091
3092#endif /* IEM_WITH_SETJMP */
3093
3094/**
3095 * Fetches the next opcode quad word, returns automatically on failure.
3096 *
3097 * @param a_pu64 Where to return the opcode quad word.
3098 * @remark Implicitly references pVCpu.
3099 */
3100#ifndef IEM_WITH_SETJMP
3101# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3102 do \
3103 { \
3104 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3105 if (rcStrict2 != VINF_SUCCESS) \
3106 return rcStrict2; \
3107 } while (0)
3108#else
3109# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3110#endif
3111
3112
3113/** @name Misc Worker Functions.
3114 * @{
3115 */
3116
3117
3118/**
3119 * Validates a new SS segment.
3120 *
3121 * @returns VBox strict status code.
3122 * @param pVCpu The cross context virtual CPU structure of the
3123 * calling thread.
3124 * @param pCtx The CPU context.
3125 * @param NewSS The new SS selctor.
3126 * @param uCpl The CPL to load the stack for.
3127 * @param pDesc Where to return the descriptor.
3128 */
3129IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3130{
3131 NOREF(pCtx);
3132
3133 /* Null selectors are not allowed (we're not called for dispatching
3134 interrupts with SS=0 in long mode). */
3135 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3136 {
3137 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3138 return iemRaiseTaskSwitchFault0(pVCpu);
3139 }
3140
3141 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3142 if ((NewSS & X86_SEL_RPL) != uCpl)
3143 {
3144 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3145 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3146 }
3147
3148 /*
3149 * Read the descriptor.
3150 */
3151 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3152 if (rcStrict != VINF_SUCCESS)
3153 return rcStrict;
3154
3155 /*
3156 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3157 */
3158 if (!pDesc->Legacy.Gen.u1DescType)
3159 {
3160 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3161 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3162 }
3163
3164 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3165 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3166 {
3167 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3168 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3169 }
3170 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3171 {
3172 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3173 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3174 }
3175
3176 /* Is it there? */
3177 /** @todo testcase: Is this checked before the canonical / limit check below? */
3178 if (!pDesc->Legacy.Gen.u1Present)
3179 {
3180 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3181 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3182 }
3183
3184 return VINF_SUCCESS;
3185}
3186
3187
3188/**
3189 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3190 * not.
3191 *
3192 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3193 * @param a_pCtx The CPU context.
3194 */
3195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3196# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3197 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3198 ? (a_pCtx)->eflags.u \
3199 : CPUMRawGetEFlags(a_pVCpu) )
3200#else
3201# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3202 ( (a_pCtx)->eflags.u )
3203#endif
3204
3205/**
3206 * Updates the EFLAGS in the correct manner wrt. PATM.
3207 *
3208 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3209 * @param a_pCtx The CPU context.
3210 * @param a_fEfl The new EFLAGS.
3211 */
3212#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3213# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3214 do { \
3215 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3216 (a_pCtx)->eflags.u = (a_fEfl); \
3217 else \
3218 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3219 } while (0)
3220#else
3221# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3222 do { \
3223 (a_pCtx)->eflags.u = (a_fEfl); \
3224 } while (0)
3225#endif
3226
3227
3228/** @} */
3229
3230/** @name Raising Exceptions.
3231 *
3232 * @{
3233 */
3234
3235/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3236 * @{ */
3237/** CPU exception. */
3238#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3239/** External interrupt (from PIC, APIC, whatever). */
3240#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3241/** Software interrupt (int or into, not bound).
3242 * Returns to the following instruction */
3243#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3244/** Takes an error code. */
3245#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3246/** Takes a CR2. */
3247#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3248/** Generated by the breakpoint instruction. */
3249#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3250/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3251#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3252/** @} */
3253
3254
3255/**
3256 * Loads the specified stack far pointer from the TSS.
3257 *
3258 * @returns VBox strict status code.
3259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3260 * @param pCtx The CPU context.
3261 * @param uCpl The CPL to load the stack for.
3262 * @param pSelSS Where to return the new stack segment.
3263 * @param puEsp Where to return the new stack pointer.
3264 */
3265IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3266 PRTSEL pSelSS, uint32_t *puEsp)
3267{
3268 VBOXSTRICTRC rcStrict;
3269 Assert(uCpl < 4);
3270
3271 switch (pCtx->tr.Attr.n.u4Type)
3272 {
3273 /*
3274 * 16-bit TSS (X86TSS16).
3275 */
3276 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3277 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3278 {
3279 uint32_t off = uCpl * 4 + 2;
3280 if (off + 4 <= pCtx->tr.u32Limit)
3281 {
3282 /** @todo check actual access pattern here. */
3283 uint32_t u32Tmp = 0; /* gcc maybe... */
3284 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3285 if (rcStrict == VINF_SUCCESS)
3286 {
3287 *puEsp = RT_LOWORD(u32Tmp);
3288 *pSelSS = RT_HIWORD(u32Tmp);
3289 return VINF_SUCCESS;
3290 }
3291 }
3292 else
3293 {
3294 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3295 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3296 }
3297 break;
3298 }
3299
3300 /*
3301 * 32-bit TSS (X86TSS32).
3302 */
3303 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3304 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3305 {
3306 uint32_t off = uCpl * 8 + 4;
3307 if (off + 7 <= pCtx->tr.u32Limit)
3308 {
3309/** @todo check actual access pattern here. */
3310 uint64_t u64Tmp;
3311 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3312 if (rcStrict == VINF_SUCCESS)
3313 {
3314 *puEsp = u64Tmp & UINT32_MAX;
3315 *pSelSS = (RTSEL)(u64Tmp >> 32);
3316 return VINF_SUCCESS;
3317 }
3318 }
3319 else
3320 {
3321 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3322 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3323 }
3324 break;
3325 }
3326
3327 default:
3328 AssertFailed();
3329 rcStrict = VERR_IEM_IPE_4;
3330 break;
3331 }
3332
3333 *puEsp = 0; /* make gcc happy */
3334 *pSelSS = 0; /* make gcc happy */
3335 return rcStrict;
3336}
3337
3338
3339/**
3340 * Loads the specified stack pointer from the 64-bit TSS.
3341 *
3342 * @returns VBox strict status code.
3343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3344 * @param pCtx The CPU context.
3345 * @param uCpl The CPL to load the stack for.
3346 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3347 * @param puRsp Where to return the new stack pointer.
3348 */
3349IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3350{
3351 Assert(uCpl < 4);
3352 Assert(uIst < 8);
3353 *puRsp = 0; /* make gcc happy */
3354
3355 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3356
3357 uint32_t off;
3358 if (uIst)
3359 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3360 else
3361 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3362 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3363 {
3364 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3365 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3366 }
3367
3368 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3369}
3370
3371
3372/**
3373 * Adjust the CPU state according to the exception being raised.
3374 *
3375 * @param pCtx The CPU context.
3376 * @param u8Vector The exception that has been raised.
3377 */
3378DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3379{
3380 switch (u8Vector)
3381 {
3382 case X86_XCPT_DB:
3383 pCtx->dr[7] &= ~X86_DR7_GD;
3384 break;
3385 /** @todo Read the AMD and Intel exception reference... */
3386 }
3387}
3388
3389
3390/**
3391 * Implements exceptions and interrupts for real mode.
3392 *
3393 * @returns VBox strict status code.
3394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3395 * @param pCtx The CPU context.
3396 * @param cbInstr The number of bytes to offset rIP by in the return
3397 * address.
3398 * @param u8Vector The interrupt / exception vector number.
3399 * @param fFlags The flags.
3400 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3401 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3402 */
3403IEM_STATIC VBOXSTRICTRC
3404iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3405 PCPUMCTX pCtx,
3406 uint8_t cbInstr,
3407 uint8_t u8Vector,
3408 uint32_t fFlags,
3409 uint16_t uErr,
3410 uint64_t uCr2)
3411{
3412 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3413 NOREF(uErr); NOREF(uCr2);
3414
3415 /*
3416 * Read the IDT entry.
3417 */
3418 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3419 {
3420 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3421 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3422 }
3423 RTFAR16 Idte;
3424 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3425 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3426 return rcStrict;
3427
3428 /*
3429 * Push the stack frame.
3430 */
3431 uint16_t *pu16Frame;
3432 uint64_t uNewRsp;
3433 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436
3437 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3438#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3439 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3440 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3441 fEfl |= UINT16_C(0xf000);
3442#endif
3443 pu16Frame[2] = (uint16_t)fEfl;
3444 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3445 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3446 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3447 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3448 return rcStrict;
3449
3450 /*
3451 * Load the vector address into cs:ip and make exception specific state
3452 * adjustments.
3453 */
3454 pCtx->cs.Sel = Idte.sel;
3455 pCtx->cs.ValidSel = Idte.sel;
3456 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3457 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3458 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3459 pCtx->rip = Idte.off;
3460 fEfl &= ~X86_EFL_IF;
3461 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3462
3463 /** @todo do we actually do this in real mode? */
3464 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3465 iemRaiseXcptAdjustState(pCtx, u8Vector);
3466
3467 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3468}
3469
3470
3471/**
3472 * Loads a NULL data selector into when coming from V8086 mode.
3473 *
3474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3475 * @param pSReg Pointer to the segment register.
3476 */
3477IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3478{
3479 pSReg->Sel = 0;
3480 pSReg->ValidSel = 0;
3481 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3482 {
3483 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3484 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3485 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3486 }
3487 else
3488 {
3489 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3490 /** @todo check this on AMD-V */
3491 pSReg->u64Base = 0;
3492 pSReg->u32Limit = 0;
3493 }
3494}
3495
3496
3497/**
3498 * Loads a segment selector during a task switch in V8086 mode.
3499 *
3500 * @param pSReg Pointer to the segment register.
3501 * @param uSel The selector value to load.
3502 */
3503IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3504{
3505 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3506 pSReg->Sel = uSel;
3507 pSReg->ValidSel = uSel;
3508 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3509 pSReg->u64Base = uSel << 4;
3510 pSReg->u32Limit = 0xffff;
3511 pSReg->Attr.u = 0xf3;
3512}
3513
3514
3515/**
3516 * Loads a NULL data selector into a selector register, both the hidden and
3517 * visible parts, in protected mode.
3518 *
3519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3520 * @param pSReg Pointer to the segment register.
3521 * @param uRpl The RPL.
3522 */
3523IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3524{
3525 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3526 * data selector in protected mode. */
3527 pSReg->Sel = uRpl;
3528 pSReg->ValidSel = uRpl;
3529 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3530 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3531 {
3532 /* VT-x (Intel 3960x) observed doing something like this. */
3533 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3534 pSReg->u32Limit = UINT32_MAX;
3535 pSReg->u64Base = 0;
3536 }
3537 else
3538 {
3539 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3540 pSReg->u32Limit = 0;
3541 pSReg->u64Base = 0;
3542 }
3543}
3544
3545
3546/**
3547 * Loads a segment selector during a task switch in protected mode.
3548 *
3549 * In this task switch scenario, we would throw \#TS exceptions rather than
3550 * \#GPs.
3551 *
3552 * @returns VBox strict status code.
3553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3554 * @param pSReg Pointer to the segment register.
3555 * @param uSel The new selector value.
3556 *
3557 * @remarks This does _not_ handle CS or SS.
3558 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3559 */
3560IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3561{
3562 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3563
3564 /* Null data selector. */
3565 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3566 {
3567 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3569 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3570 return VINF_SUCCESS;
3571 }
3572
3573 /* Fetch the descriptor. */
3574 IEMSELDESC Desc;
3575 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3576 if (rcStrict != VINF_SUCCESS)
3577 {
3578 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3579 VBOXSTRICTRC_VAL(rcStrict)));
3580 return rcStrict;
3581 }
3582
3583 /* Must be a data segment or readable code segment. */
3584 if ( !Desc.Legacy.Gen.u1DescType
3585 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3586 {
3587 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3588 Desc.Legacy.Gen.u4Type));
3589 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3590 }
3591
3592 /* Check privileges for data segments and non-conforming code segments. */
3593 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3594 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3595 {
3596 /* The RPL and the new CPL must be less than or equal to the DPL. */
3597 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3598 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3599 {
3600 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3601 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3602 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3603 }
3604 }
3605
3606 /* Is it there? */
3607 if (!Desc.Legacy.Gen.u1Present)
3608 {
3609 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3610 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3611 }
3612
3613 /* The base and limit. */
3614 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3615 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3616
3617 /*
3618 * Ok, everything checked out fine. Now set the accessed bit before
3619 * committing the result into the registers.
3620 */
3621 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3622 {
3623 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3624 if (rcStrict != VINF_SUCCESS)
3625 return rcStrict;
3626 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3627 }
3628
3629 /* Commit */
3630 pSReg->Sel = uSel;
3631 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3632 pSReg->u32Limit = cbLimit;
3633 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3634 pSReg->ValidSel = uSel;
3635 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3636 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3637 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3638
3639 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3640 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3641 return VINF_SUCCESS;
3642}
3643
3644
3645/**
3646 * Performs a task switch.
3647 *
3648 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3649 * caller is responsible for performing the necessary checks (like DPL, TSS
3650 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3651 * reference for JMP, CALL, IRET.
3652 *
3653 * If the task switch is the due to a software interrupt or hardware exception,
3654 * the caller is responsible for validating the TSS selector and descriptor. See
3655 * Intel Instruction reference for INT n.
3656 *
3657 * @returns VBox strict status code.
3658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3659 * @param pCtx The CPU context.
3660 * @param enmTaskSwitch What caused this task switch.
3661 * @param uNextEip The EIP effective after the task switch.
3662 * @param fFlags The flags.
3663 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3664 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3665 * @param SelTSS The TSS selector of the new task.
3666 * @param pNewDescTSS Pointer to the new TSS descriptor.
3667 */
3668IEM_STATIC VBOXSTRICTRC
3669iemTaskSwitch(PVMCPU pVCpu,
3670 PCPUMCTX pCtx,
3671 IEMTASKSWITCH enmTaskSwitch,
3672 uint32_t uNextEip,
3673 uint32_t fFlags,
3674 uint16_t uErr,
3675 uint64_t uCr2,
3676 RTSEL SelTSS,
3677 PIEMSELDESC pNewDescTSS)
3678{
3679 Assert(!IEM_IS_REAL_MODE(pVCpu));
3680 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3681
3682 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3683 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3684 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3685 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3686 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3687
3688 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3689 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3690
3691 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3692 fIsNewTSS386, pCtx->eip, uNextEip));
3693
3694 /* Update CR2 in case it's a page-fault. */
3695 /** @todo This should probably be done much earlier in IEM/PGM. See
3696 * @bugref{5653#c49}. */
3697 if (fFlags & IEM_XCPT_FLAGS_CR2)
3698 pCtx->cr2 = uCr2;
3699
3700 /*
3701 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3702 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3703 */
3704 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3705 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3706 if (uNewTSSLimit < uNewTSSLimitMin)
3707 {
3708 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3709 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3710 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3711 }
3712
3713 /*
3714 * Check the current TSS limit. The last written byte to the current TSS during the
3715 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3716 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3717 *
3718 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3719 * end up with smaller than "legal" TSS limits.
3720 */
3721 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3722 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3723 if (uCurTSSLimit < uCurTSSLimitMin)
3724 {
3725 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3726 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3727 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3728 }
3729
3730 /*
3731 * Verify that the new TSS can be accessed and map it. Map only the required contents
3732 * and not the entire TSS.
3733 */
3734 void *pvNewTSS;
3735 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3736 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3737 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3738 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3739 * not perform correct translation if this happens. See Intel spec. 7.2.1
3740 * "Task-State Segment" */
3741 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3742 if (rcStrict != VINF_SUCCESS)
3743 {
3744 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3745 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3746 return rcStrict;
3747 }
3748
3749 /*
3750 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3751 */
3752 uint32_t u32EFlags = pCtx->eflags.u32;
3753 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3754 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3755 {
3756 PX86DESC pDescCurTSS;
3757 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3758 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3759 if (rcStrict != VINF_SUCCESS)
3760 {
3761 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3762 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3767 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3768 if (rcStrict != VINF_SUCCESS)
3769 {
3770 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3771 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3772 return rcStrict;
3773 }
3774
3775 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3776 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3777 {
3778 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3779 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3780 u32EFlags &= ~X86_EFL_NT;
3781 }
3782 }
3783
3784 /*
3785 * Save the CPU state into the current TSS.
3786 */
3787 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3788 if (GCPtrNewTSS == GCPtrCurTSS)
3789 {
3790 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3791 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3792 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3793 }
3794 if (fIsNewTSS386)
3795 {
3796 /*
3797 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3798 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3799 */
3800 void *pvCurTSS32;
3801 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3802 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3803 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3804 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3805 if (rcStrict != VINF_SUCCESS)
3806 {
3807 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3808 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3809 return rcStrict;
3810 }
3811
3812 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3813 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3814 pCurTSS32->eip = uNextEip;
3815 pCurTSS32->eflags = u32EFlags;
3816 pCurTSS32->eax = pCtx->eax;
3817 pCurTSS32->ecx = pCtx->ecx;
3818 pCurTSS32->edx = pCtx->edx;
3819 pCurTSS32->ebx = pCtx->ebx;
3820 pCurTSS32->esp = pCtx->esp;
3821 pCurTSS32->ebp = pCtx->ebp;
3822 pCurTSS32->esi = pCtx->esi;
3823 pCurTSS32->edi = pCtx->edi;
3824 pCurTSS32->es = pCtx->es.Sel;
3825 pCurTSS32->cs = pCtx->cs.Sel;
3826 pCurTSS32->ss = pCtx->ss.Sel;
3827 pCurTSS32->ds = pCtx->ds.Sel;
3828 pCurTSS32->fs = pCtx->fs.Sel;
3829 pCurTSS32->gs = pCtx->gs.Sel;
3830
3831 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3832 if (rcStrict != VINF_SUCCESS)
3833 {
3834 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3835 VBOXSTRICTRC_VAL(rcStrict)));
3836 return rcStrict;
3837 }
3838 }
3839 else
3840 {
3841 /*
3842 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3843 */
3844 void *pvCurTSS16;
3845 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3846 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3847 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3848 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3849 if (rcStrict != VINF_SUCCESS)
3850 {
3851 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3852 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3853 return rcStrict;
3854 }
3855
3856 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3857 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3858 pCurTSS16->ip = uNextEip;
3859 pCurTSS16->flags = u32EFlags;
3860 pCurTSS16->ax = pCtx->ax;
3861 pCurTSS16->cx = pCtx->cx;
3862 pCurTSS16->dx = pCtx->dx;
3863 pCurTSS16->bx = pCtx->bx;
3864 pCurTSS16->sp = pCtx->sp;
3865 pCurTSS16->bp = pCtx->bp;
3866 pCurTSS16->si = pCtx->si;
3867 pCurTSS16->di = pCtx->di;
3868 pCurTSS16->es = pCtx->es.Sel;
3869 pCurTSS16->cs = pCtx->cs.Sel;
3870 pCurTSS16->ss = pCtx->ss.Sel;
3871 pCurTSS16->ds = pCtx->ds.Sel;
3872
3873 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3874 if (rcStrict != VINF_SUCCESS)
3875 {
3876 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3877 VBOXSTRICTRC_VAL(rcStrict)));
3878 return rcStrict;
3879 }
3880 }
3881
3882 /*
3883 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3884 */
3885 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3886 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3887 {
3888 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3889 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3890 pNewTSS->selPrev = pCtx->tr.Sel;
3891 }
3892
3893 /*
3894 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3895 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3896 */
3897 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3898 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3899 bool fNewDebugTrap;
3900 if (fIsNewTSS386)
3901 {
3902 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3903 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3904 uNewEip = pNewTSS32->eip;
3905 uNewEflags = pNewTSS32->eflags;
3906 uNewEax = pNewTSS32->eax;
3907 uNewEcx = pNewTSS32->ecx;
3908 uNewEdx = pNewTSS32->edx;
3909 uNewEbx = pNewTSS32->ebx;
3910 uNewEsp = pNewTSS32->esp;
3911 uNewEbp = pNewTSS32->ebp;
3912 uNewEsi = pNewTSS32->esi;
3913 uNewEdi = pNewTSS32->edi;
3914 uNewES = pNewTSS32->es;
3915 uNewCS = pNewTSS32->cs;
3916 uNewSS = pNewTSS32->ss;
3917 uNewDS = pNewTSS32->ds;
3918 uNewFS = pNewTSS32->fs;
3919 uNewGS = pNewTSS32->gs;
3920 uNewLdt = pNewTSS32->selLdt;
3921 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3922 }
3923 else
3924 {
3925 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3926 uNewCr3 = 0;
3927 uNewEip = pNewTSS16->ip;
3928 uNewEflags = pNewTSS16->flags;
3929 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3930 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3931 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3932 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3933 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3934 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3935 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3936 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3937 uNewES = pNewTSS16->es;
3938 uNewCS = pNewTSS16->cs;
3939 uNewSS = pNewTSS16->ss;
3940 uNewDS = pNewTSS16->ds;
3941 uNewFS = 0;
3942 uNewGS = 0;
3943 uNewLdt = pNewTSS16->selLdt;
3944 fNewDebugTrap = false;
3945 }
3946
3947 if (GCPtrNewTSS == GCPtrCurTSS)
3948 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3949 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3950
3951 /*
3952 * We're done accessing the new TSS.
3953 */
3954 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3955 if (rcStrict != VINF_SUCCESS)
3956 {
3957 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3958 return rcStrict;
3959 }
3960
3961 /*
3962 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3963 */
3964 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3965 {
3966 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3967 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3968 if (rcStrict != VINF_SUCCESS)
3969 {
3970 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3971 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3972 return rcStrict;
3973 }
3974
3975 /* Check that the descriptor indicates the new TSS is available (not busy). */
3976 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3977 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3978 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3979
3980 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3981 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3982 if (rcStrict != VINF_SUCCESS)
3983 {
3984 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3985 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3986 return rcStrict;
3987 }
3988 }
3989
3990 /*
3991 * From this point on, we're technically in the new task. We will defer exceptions
3992 * until the completion of the task switch but before executing any instructions in the new task.
3993 */
3994 pCtx->tr.Sel = SelTSS;
3995 pCtx->tr.ValidSel = SelTSS;
3996 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3997 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3998 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3999 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4000 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4001
4002 /* Set the busy bit in TR. */
4003 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4004 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4005 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4006 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4007 {
4008 uNewEflags |= X86_EFL_NT;
4009 }
4010
4011 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4012 pCtx->cr0 |= X86_CR0_TS;
4013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4014
4015 pCtx->eip = uNewEip;
4016 pCtx->eax = uNewEax;
4017 pCtx->ecx = uNewEcx;
4018 pCtx->edx = uNewEdx;
4019 pCtx->ebx = uNewEbx;
4020 pCtx->esp = uNewEsp;
4021 pCtx->ebp = uNewEbp;
4022 pCtx->esi = uNewEsi;
4023 pCtx->edi = uNewEdi;
4024
4025 uNewEflags &= X86_EFL_LIVE_MASK;
4026 uNewEflags |= X86_EFL_RA1_MASK;
4027 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4028
4029 /*
4030 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4031 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4032 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4033 */
4034 pCtx->es.Sel = uNewES;
4035 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4036
4037 pCtx->cs.Sel = uNewCS;
4038 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4039
4040 pCtx->ss.Sel = uNewSS;
4041 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4042
4043 pCtx->ds.Sel = uNewDS;
4044 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4045
4046 pCtx->fs.Sel = uNewFS;
4047 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4048
4049 pCtx->gs.Sel = uNewGS;
4050 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4051 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4052
4053 pCtx->ldtr.Sel = uNewLdt;
4054 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4055 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4056 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4057
4058 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4059 {
4060 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4061 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4062 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4063 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4064 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4065 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4066 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4067 }
4068
4069 /*
4070 * Switch CR3 for the new task.
4071 */
4072 if ( fIsNewTSS386
4073 && (pCtx->cr0 & X86_CR0_PG))
4074 {
4075 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4076 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4077 {
4078 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4079 AssertRCSuccessReturn(rc, rc);
4080 }
4081 else
4082 pCtx->cr3 = uNewCr3;
4083
4084 /* Inform PGM. */
4085 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4086 {
4087 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4088 AssertRCReturn(rc, rc);
4089 /* ignore informational status codes */
4090 }
4091 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4092 }
4093
4094 /*
4095 * Switch LDTR for the new task.
4096 */
4097 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4098 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4099 else
4100 {
4101 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4102
4103 IEMSELDESC DescNewLdt;
4104 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4105 if (rcStrict != VINF_SUCCESS)
4106 {
4107 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4108 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4109 return rcStrict;
4110 }
4111 if ( !DescNewLdt.Legacy.Gen.u1Present
4112 || DescNewLdt.Legacy.Gen.u1DescType
4113 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4114 {
4115 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4116 uNewLdt, DescNewLdt.Legacy.u));
4117 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4118 }
4119
4120 pCtx->ldtr.ValidSel = uNewLdt;
4121 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4122 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4123 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4124 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4125 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4126 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4128 }
4129
4130 IEMSELDESC DescSS;
4131 if (IEM_IS_V86_MODE(pVCpu))
4132 {
4133 pVCpu->iem.s.uCpl = 3;
4134 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4135 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4136 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4137 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4138 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4139 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4140
4141 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4142 DescSS.Legacy.u = 0;
4143 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4144 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4145 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4146 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4147 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4148 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4149 DescSS.Legacy.Gen.u2Dpl = 3;
4150 }
4151 else
4152 {
4153 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4154
4155 /*
4156 * Load the stack segment for the new task.
4157 */
4158 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4159 {
4160 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4161 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4162 }
4163
4164 /* Fetch the descriptor. */
4165 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4166 if (rcStrict != VINF_SUCCESS)
4167 {
4168 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4169 VBOXSTRICTRC_VAL(rcStrict)));
4170 return rcStrict;
4171 }
4172
4173 /* SS must be a data segment and writable. */
4174 if ( !DescSS.Legacy.Gen.u1DescType
4175 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4176 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4177 {
4178 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4179 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4180 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4181 }
4182
4183 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4184 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4185 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4186 {
4187 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4188 uNewCpl));
4189 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4190 }
4191
4192 /* Is it there? */
4193 if (!DescSS.Legacy.Gen.u1Present)
4194 {
4195 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4196 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4197 }
4198
4199 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4200 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4201
4202 /* Set the accessed bit before committing the result into SS. */
4203 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4204 {
4205 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4206 if (rcStrict != VINF_SUCCESS)
4207 return rcStrict;
4208 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4209 }
4210
4211 /* Commit SS. */
4212 pCtx->ss.Sel = uNewSS;
4213 pCtx->ss.ValidSel = uNewSS;
4214 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4215 pCtx->ss.u32Limit = cbLimit;
4216 pCtx->ss.u64Base = u64Base;
4217 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4219
4220 /* CPL has changed, update IEM before loading rest of segments. */
4221 pVCpu->iem.s.uCpl = uNewCpl;
4222
4223 /*
4224 * Load the data segments for the new task.
4225 */
4226 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4227 if (rcStrict != VINF_SUCCESS)
4228 return rcStrict;
4229 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4230 if (rcStrict != VINF_SUCCESS)
4231 return rcStrict;
4232 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4233 if (rcStrict != VINF_SUCCESS)
4234 return rcStrict;
4235 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238
4239 /*
4240 * Load the code segment for the new task.
4241 */
4242 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4243 {
4244 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4245 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4246 }
4247
4248 /* Fetch the descriptor. */
4249 IEMSELDESC DescCS;
4250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4251 if (rcStrict != VINF_SUCCESS)
4252 {
4253 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4254 return rcStrict;
4255 }
4256
4257 /* CS must be a code segment. */
4258 if ( !DescCS.Legacy.Gen.u1DescType
4259 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4260 {
4261 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4262 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4263 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4264 }
4265
4266 /* For conforming CS, DPL must be less than or equal to the RPL. */
4267 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4268 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4269 {
4270 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4271 DescCS.Legacy.Gen.u2Dpl));
4272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4273 }
4274
4275 /* For non-conforming CS, DPL must match RPL. */
4276 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4277 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4278 {
4279 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4280 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4281 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4282 }
4283
4284 /* Is it there? */
4285 if (!DescCS.Legacy.Gen.u1Present)
4286 {
4287 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4288 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4289 }
4290
4291 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4292 u64Base = X86DESC_BASE(&DescCS.Legacy);
4293
4294 /* Set the accessed bit before committing the result into CS. */
4295 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4296 {
4297 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4298 if (rcStrict != VINF_SUCCESS)
4299 return rcStrict;
4300 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4301 }
4302
4303 /* Commit CS. */
4304 pCtx->cs.Sel = uNewCS;
4305 pCtx->cs.ValidSel = uNewCS;
4306 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4307 pCtx->cs.u32Limit = cbLimit;
4308 pCtx->cs.u64Base = u64Base;
4309 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4311 }
4312
4313 /** @todo Debug trap. */
4314 if (fIsNewTSS386 && fNewDebugTrap)
4315 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4316
4317 /*
4318 * Construct the error code masks based on what caused this task switch.
4319 * See Intel Instruction reference for INT.
4320 */
4321 uint16_t uExt;
4322 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4323 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4324 {
4325 uExt = 1;
4326 }
4327 else
4328 uExt = 0;
4329
4330 /*
4331 * Push any error code on to the new stack.
4332 */
4333 if (fFlags & IEM_XCPT_FLAGS_ERR)
4334 {
4335 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4336 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4337 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4338
4339 /* Check that there is sufficient space on the stack. */
4340 /** @todo Factor out segment limit checking for normal/expand down segments
4341 * into a separate function. */
4342 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4343 {
4344 if ( pCtx->esp - 1 > cbLimitSS
4345 || pCtx->esp < cbStackFrame)
4346 {
4347 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4348 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4349 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4350 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4351 }
4352 }
4353 else
4354 {
4355 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4356 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4357 {
4358 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4359 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4360 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4361 }
4362 }
4363
4364
4365 if (fIsNewTSS386)
4366 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4367 else
4368 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4369 if (rcStrict != VINF_SUCCESS)
4370 {
4371 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4372 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4373 return rcStrict;
4374 }
4375 }
4376
4377 /* Check the new EIP against the new CS limit. */
4378 if (pCtx->eip > pCtx->cs.u32Limit)
4379 {
4380 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4381 pCtx->eip, pCtx->cs.u32Limit));
4382 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4383 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4384 }
4385
4386 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4387 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4388}
4389
4390
4391/**
4392 * Implements exceptions and interrupts for protected mode.
4393 *
4394 * @returns VBox strict status code.
4395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4396 * @param pCtx The CPU context.
4397 * @param cbInstr The number of bytes to offset rIP by in the return
4398 * address.
4399 * @param u8Vector The interrupt / exception vector number.
4400 * @param fFlags The flags.
4401 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4402 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4403 */
4404IEM_STATIC VBOXSTRICTRC
4405iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4406 PCPUMCTX pCtx,
4407 uint8_t cbInstr,
4408 uint8_t u8Vector,
4409 uint32_t fFlags,
4410 uint16_t uErr,
4411 uint64_t uCr2)
4412{
4413 /*
4414 * Read the IDT entry.
4415 */
4416 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4417 {
4418 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4419 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4420 }
4421 X86DESC Idte;
4422 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4423 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4424 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4425 return rcStrict;
4426 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4427 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4428 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4429
4430 /*
4431 * Check the descriptor type, DPL and such.
4432 * ASSUMES this is done in the same order as described for call-gate calls.
4433 */
4434 if (Idte.Gate.u1DescType)
4435 {
4436 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4437 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4438 }
4439 bool fTaskGate = false;
4440 uint8_t f32BitGate = true;
4441 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4442 switch (Idte.Gate.u4Type)
4443 {
4444 case X86_SEL_TYPE_SYS_UNDEFINED:
4445 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4446 case X86_SEL_TYPE_SYS_LDT:
4447 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4448 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4449 case X86_SEL_TYPE_SYS_UNDEFINED2:
4450 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4451 case X86_SEL_TYPE_SYS_UNDEFINED3:
4452 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4453 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4454 case X86_SEL_TYPE_SYS_UNDEFINED4:
4455 {
4456 /** @todo check what actually happens when the type is wrong...
4457 * esp. call gates. */
4458 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4459 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4460 }
4461
4462 case X86_SEL_TYPE_SYS_286_INT_GATE:
4463 f32BitGate = false;
4464 /* fall thru */
4465 case X86_SEL_TYPE_SYS_386_INT_GATE:
4466 fEflToClear |= X86_EFL_IF;
4467 break;
4468
4469 case X86_SEL_TYPE_SYS_TASK_GATE:
4470 fTaskGate = true;
4471#ifndef IEM_IMPLEMENTS_TASKSWITCH
4472 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4473#endif
4474 break;
4475
4476 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4477 f32BitGate = false;
4478 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4479 break;
4480
4481 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4482 }
4483
4484 /* Check DPL against CPL if applicable. */
4485 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4486 {
4487 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4488 {
4489 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4490 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4491 }
4492 }
4493
4494 /* Is it there? */
4495 if (!Idte.Gate.u1Present)
4496 {
4497 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4498 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4499 }
4500
4501 /* Is it a task-gate? */
4502 if (fTaskGate)
4503 {
4504 /*
4505 * Construct the error code masks based on what caused this task switch.
4506 * See Intel Instruction reference for INT.
4507 */
4508 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4509 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4510 RTSEL SelTSS = Idte.Gate.u16Sel;
4511
4512 /*
4513 * Fetch the TSS descriptor in the GDT.
4514 */
4515 IEMSELDESC DescTSS;
4516 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4517 if (rcStrict != VINF_SUCCESS)
4518 {
4519 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4520 VBOXSTRICTRC_VAL(rcStrict)));
4521 return rcStrict;
4522 }
4523
4524 /* The TSS descriptor must be a system segment and be available (not busy). */
4525 if ( DescTSS.Legacy.Gen.u1DescType
4526 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4527 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4528 {
4529 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4530 u8Vector, SelTSS, DescTSS.Legacy.au64));
4531 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4532 }
4533
4534 /* The TSS must be present. */
4535 if (!DescTSS.Legacy.Gen.u1Present)
4536 {
4537 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4538 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4539 }
4540
4541 /* Do the actual task switch. */
4542 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4543 }
4544
4545 /* A null CS is bad. */
4546 RTSEL NewCS = Idte.Gate.u16Sel;
4547 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4548 {
4549 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4550 return iemRaiseGeneralProtectionFault0(pVCpu);
4551 }
4552
4553 /* Fetch the descriptor for the new CS. */
4554 IEMSELDESC DescCS;
4555 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4556 if (rcStrict != VINF_SUCCESS)
4557 {
4558 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4559 return rcStrict;
4560 }
4561
4562 /* Must be a code segment. */
4563 if (!DescCS.Legacy.Gen.u1DescType)
4564 {
4565 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4566 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4567 }
4568 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4569 {
4570 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4571 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 /* Don't allow lowering the privilege level. */
4575 /** @todo Does the lowering of privileges apply to software interrupts
4576 * only? This has bearings on the more-privileged or
4577 * same-privilege stack behavior further down. A testcase would
4578 * be nice. */
4579 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4580 {
4581 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4582 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4583 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4584 }
4585
4586 /* Make sure the selector is present. */
4587 if (!DescCS.Legacy.Gen.u1Present)
4588 {
4589 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4590 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4591 }
4592
4593 /* Check the new EIP against the new CS limit. */
4594 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4595 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4596 ? Idte.Gate.u16OffsetLow
4597 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4598 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4599 if (uNewEip > cbLimitCS)
4600 {
4601 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4602 u8Vector, uNewEip, cbLimitCS, NewCS));
4603 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4604 }
4605 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4606
4607 /* Calc the flag image to push. */
4608 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4609 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4610 fEfl &= ~X86_EFL_RF;
4611 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4612 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4613
4614 /* From V8086 mode only go to CPL 0. */
4615 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4616 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4617 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4618 {
4619 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4620 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4621 }
4622
4623 /*
4624 * If the privilege level changes, we need to get a new stack from the TSS.
4625 * This in turns means validating the new SS and ESP...
4626 */
4627 if (uNewCpl != pVCpu->iem.s.uCpl)
4628 {
4629 RTSEL NewSS;
4630 uint32_t uNewEsp;
4631 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4632 if (rcStrict != VINF_SUCCESS)
4633 return rcStrict;
4634
4635 IEMSELDESC DescSS;
4636 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4637 if (rcStrict != VINF_SUCCESS)
4638 return rcStrict;
4639 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4640 if (!DescSS.Legacy.Gen.u1DefBig)
4641 {
4642 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4643 uNewEsp = (uint16_t)uNewEsp;
4644 }
4645
4646 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4647
4648 /* Check that there is sufficient space for the stack frame. */
4649 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4650 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4651 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4652 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4653
4654 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4655 {
4656 if ( uNewEsp - 1 > cbLimitSS
4657 || uNewEsp < cbStackFrame)
4658 {
4659 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4660 u8Vector, NewSS, uNewEsp, cbStackFrame));
4661 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4662 }
4663 }
4664 else
4665 {
4666 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4667 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4668 {
4669 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4670 u8Vector, NewSS, uNewEsp, cbStackFrame));
4671 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4672 }
4673 }
4674
4675 /*
4676 * Start making changes.
4677 */
4678
4679 /* Set the new CPL so that stack accesses use it. */
4680 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4681 pVCpu->iem.s.uCpl = uNewCpl;
4682
4683 /* Create the stack frame. */
4684 RTPTRUNION uStackFrame;
4685 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4686 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4687 if (rcStrict != VINF_SUCCESS)
4688 return rcStrict;
4689 void * const pvStackFrame = uStackFrame.pv;
4690 if (f32BitGate)
4691 {
4692 if (fFlags & IEM_XCPT_FLAGS_ERR)
4693 *uStackFrame.pu32++ = uErr;
4694 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4695 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4696 uStackFrame.pu32[2] = fEfl;
4697 uStackFrame.pu32[3] = pCtx->esp;
4698 uStackFrame.pu32[4] = pCtx->ss.Sel;
4699 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4700 if (fEfl & X86_EFL_VM)
4701 {
4702 uStackFrame.pu32[1] = pCtx->cs.Sel;
4703 uStackFrame.pu32[5] = pCtx->es.Sel;
4704 uStackFrame.pu32[6] = pCtx->ds.Sel;
4705 uStackFrame.pu32[7] = pCtx->fs.Sel;
4706 uStackFrame.pu32[8] = pCtx->gs.Sel;
4707 }
4708 }
4709 else
4710 {
4711 if (fFlags & IEM_XCPT_FLAGS_ERR)
4712 *uStackFrame.pu16++ = uErr;
4713 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4714 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4715 uStackFrame.pu16[2] = fEfl;
4716 uStackFrame.pu16[3] = pCtx->sp;
4717 uStackFrame.pu16[4] = pCtx->ss.Sel;
4718 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4719 if (fEfl & X86_EFL_VM)
4720 {
4721 uStackFrame.pu16[1] = pCtx->cs.Sel;
4722 uStackFrame.pu16[5] = pCtx->es.Sel;
4723 uStackFrame.pu16[6] = pCtx->ds.Sel;
4724 uStackFrame.pu16[7] = pCtx->fs.Sel;
4725 uStackFrame.pu16[8] = pCtx->gs.Sel;
4726 }
4727 }
4728 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4729 if (rcStrict != VINF_SUCCESS)
4730 return rcStrict;
4731
4732 /* Mark the selectors 'accessed' (hope this is the correct time). */
4733 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4734 * after pushing the stack frame? (Write protect the gdt + stack to
4735 * find out.) */
4736 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4737 {
4738 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4739 if (rcStrict != VINF_SUCCESS)
4740 return rcStrict;
4741 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4742 }
4743
4744 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4745 {
4746 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4747 if (rcStrict != VINF_SUCCESS)
4748 return rcStrict;
4749 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4750 }
4751
4752 /*
4753 * Start comitting the register changes (joins with the DPL=CPL branch).
4754 */
4755 pCtx->ss.Sel = NewSS;
4756 pCtx->ss.ValidSel = NewSS;
4757 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4758 pCtx->ss.u32Limit = cbLimitSS;
4759 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4760 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4761 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4762 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4763 * SP is loaded).
4764 * Need to check the other combinations too:
4765 * - 16-bit TSS, 32-bit handler
4766 * - 32-bit TSS, 16-bit handler */
4767 if (!pCtx->ss.Attr.n.u1DefBig)
4768 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4769 else
4770 pCtx->rsp = uNewEsp - cbStackFrame;
4771
4772 if (fEfl & X86_EFL_VM)
4773 {
4774 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4775 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4776 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4777 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4778 }
4779 }
4780 /*
4781 * Same privilege, no stack change and smaller stack frame.
4782 */
4783 else
4784 {
4785 uint64_t uNewRsp;
4786 RTPTRUNION uStackFrame;
4787 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4788 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4789 if (rcStrict != VINF_SUCCESS)
4790 return rcStrict;
4791 void * const pvStackFrame = uStackFrame.pv;
4792
4793 if (f32BitGate)
4794 {
4795 if (fFlags & IEM_XCPT_FLAGS_ERR)
4796 *uStackFrame.pu32++ = uErr;
4797 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4798 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4799 uStackFrame.pu32[2] = fEfl;
4800 }
4801 else
4802 {
4803 if (fFlags & IEM_XCPT_FLAGS_ERR)
4804 *uStackFrame.pu16++ = uErr;
4805 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4806 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4807 uStackFrame.pu16[2] = fEfl;
4808 }
4809 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4810 if (rcStrict != VINF_SUCCESS)
4811 return rcStrict;
4812
4813 /* Mark the CS selector as 'accessed'. */
4814 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4815 {
4816 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4817 if (rcStrict != VINF_SUCCESS)
4818 return rcStrict;
4819 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4820 }
4821
4822 /*
4823 * Start committing the register changes (joins with the other branch).
4824 */
4825 pCtx->rsp = uNewRsp;
4826 }
4827
4828 /* ... register committing continues. */
4829 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4830 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4831 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4832 pCtx->cs.u32Limit = cbLimitCS;
4833 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4834 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4835
4836 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4837 fEfl &= ~fEflToClear;
4838 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4839
4840 if (fFlags & IEM_XCPT_FLAGS_CR2)
4841 pCtx->cr2 = uCr2;
4842
4843 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4844 iemRaiseXcptAdjustState(pCtx, u8Vector);
4845
4846 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4847}
4848
4849
4850/**
4851 * Implements exceptions and interrupts for long mode.
4852 *
4853 * @returns VBox strict status code.
4854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4855 * @param pCtx The CPU context.
4856 * @param cbInstr The number of bytes to offset rIP by in the return
4857 * address.
4858 * @param u8Vector The interrupt / exception vector number.
4859 * @param fFlags The flags.
4860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4862 */
4863IEM_STATIC VBOXSTRICTRC
4864iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4865 PCPUMCTX pCtx,
4866 uint8_t cbInstr,
4867 uint8_t u8Vector,
4868 uint32_t fFlags,
4869 uint16_t uErr,
4870 uint64_t uCr2)
4871{
4872 /*
4873 * Read the IDT entry.
4874 */
4875 uint16_t offIdt = (uint16_t)u8Vector << 4;
4876 if (pCtx->idtr.cbIdt < offIdt + 7)
4877 {
4878 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4879 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4880 }
4881 X86DESC64 Idte;
4882 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4883 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4884 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4885 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4886 return rcStrict;
4887 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4888 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4889 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4890
4891 /*
4892 * Check the descriptor type, DPL and such.
4893 * ASSUMES this is done in the same order as described for call-gate calls.
4894 */
4895 if (Idte.Gate.u1DescType)
4896 {
4897 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4898 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4899 }
4900 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4901 switch (Idte.Gate.u4Type)
4902 {
4903 case AMD64_SEL_TYPE_SYS_INT_GATE:
4904 fEflToClear |= X86_EFL_IF;
4905 break;
4906 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4907 break;
4908
4909 default:
4910 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4911 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4912 }
4913
4914 /* Check DPL against CPL if applicable. */
4915 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4916 {
4917 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4918 {
4919 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4920 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4921 }
4922 }
4923
4924 /* Is it there? */
4925 if (!Idte.Gate.u1Present)
4926 {
4927 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4928 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4929 }
4930
4931 /* A null CS is bad. */
4932 RTSEL NewCS = Idte.Gate.u16Sel;
4933 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4934 {
4935 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4936 return iemRaiseGeneralProtectionFault0(pVCpu);
4937 }
4938
4939 /* Fetch the descriptor for the new CS. */
4940 IEMSELDESC DescCS;
4941 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4942 if (rcStrict != VINF_SUCCESS)
4943 {
4944 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4945 return rcStrict;
4946 }
4947
4948 /* Must be a 64-bit code segment. */
4949 if (!DescCS.Long.Gen.u1DescType)
4950 {
4951 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4952 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4953 }
4954 if ( !DescCS.Long.Gen.u1Long
4955 || DescCS.Long.Gen.u1DefBig
4956 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4957 {
4958 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4959 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4960 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4961 }
4962
4963 /* Don't allow lowering the privilege level. For non-conforming CS
4964 selectors, the CS.DPL sets the privilege level the trap/interrupt
4965 handler runs at. For conforming CS selectors, the CPL remains
4966 unchanged, but the CS.DPL must be <= CPL. */
4967 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4968 * when CPU in Ring-0. Result \#GP? */
4969 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4970 {
4971 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4972 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4973 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4974 }
4975
4976
4977 /* Make sure the selector is present. */
4978 if (!DescCS.Legacy.Gen.u1Present)
4979 {
4980 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4981 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4982 }
4983
4984 /* Check that the new RIP is canonical. */
4985 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4986 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4987 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4988 if (!IEM_IS_CANONICAL(uNewRip))
4989 {
4990 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4991 return iemRaiseGeneralProtectionFault0(pVCpu);
4992 }
4993
4994 /*
4995 * If the privilege level changes or if the IST isn't zero, we need to get
4996 * a new stack from the TSS.
4997 */
4998 uint64_t uNewRsp;
4999 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5000 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5001 if ( uNewCpl != pVCpu->iem.s.uCpl
5002 || Idte.Gate.u3IST != 0)
5003 {
5004 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5005 if (rcStrict != VINF_SUCCESS)
5006 return rcStrict;
5007 }
5008 else
5009 uNewRsp = pCtx->rsp;
5010 uNewRsp &= ~(uint64_t)0xf;
5011
5012 /*
5013 * Calc the flag image to push.
5014 */
5015 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5016 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5017 fEfl &= ~X86_EFL_RF;
5018 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5019 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5020
5021 /*
5022 * Start making changes.
5023 */
5024 /* Set the new CPL so that stack accesses use it. */
5025 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5026 pVCpu->iem.s.uCpl = uNewCpl;
5027
5028 /* Create the stack frame. */
5029 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5030 RTPTRUNION uStackFrame;
5031 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5032 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035 void * const pvStackFrame = uStackFrame.pv;
5036
5037 if (fFlags & IEM_XCPT_FLAGS_ERR)
5038 *uStackFrame.pu64++ = uErr;
5039 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5040 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5041 uStackFrame.pu64[2] = fEfl;
5042 uStackFrame.pu64[3] = pCtx->rsp;
5043 uStackFrame.pu64[4] = pCtx->ss.Sel;
5044 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5045 if (rcStrict != VINF_SUCCESS)
5046 return rcStrict;
5047
5048 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5049 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5050 * after pushing the stack frame? (Write protect the gdt + stack to
5051 * find out.) */
5052 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5053 {
5054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5055 if (rcStrict != VINF_SUCCESS)
5056 return rcStrict;
5057 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5058 }
5059
5060 /*
5061 * Start comitting the register changes.
5062 */
5063 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5064 * hidden registers when interrupting 32-bit or 16-bit code! */
5065 if (uNewCpl != uOldCpl)
5066 {
5067 pCtx->ss.Sel = 0 | uNewCpl;
5068 pCtx->ss.ValidSel = 0 | uNewCpl;
5069 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5070 pCtx->ss.u32Limit = UINT32_MAX;
5071 pCtx->ss.u64Base = 0;
5072 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5073 }
5074 pCtx->rsp = uNewRsp - cbStackFrame;
5075 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5076 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5077 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5078 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5079 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5080 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5081 pCtx->rip = uNewRip;
5082
5083 fEfl &= ~fEflToClear;
5084 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5085
5086 if (fFlags & IEM_XCPT_FLAGS_CR2)
5087 pCtx->cr2 = uCr2;
5088
5089 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5090 iemRaiseXcptAdjustState(pCtx, u8Vector);
5091
5092 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5093}
5094
5095
5096/**
5097 * Implements exceptions and interrupts.
5098 *
5099 * All exceptions and interrupts goes thru this function!
5100 *
5101 * @returns VBox strict status code.
5102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5103 * @param cbInstr The number of bytes to offset rIP by in the return
5104 * address.
5105 * @param u8Vector The interrupt / exception vector number.
5106 * @param fFlags The flags.
5107 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5108 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5109 */
5110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5111iemRaiseXcptOrInt(PVMCPU pVCpu,
5112 uint8_t cbInstr,
5113 uint8_t u8Vector,
5114 uint32_t fFlags,
5115 uint16_t uErr,
5116 uint64_t uCr2)
5117{
5118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5119#ifdef IN_RING0
5120 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5121 AssertRCReturn(rc, rc);
5122#endif
5123
5124#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5125 /*
5126 * Flush prefetch buffer
5127 */
5128 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5129#endif
5130
5131 /*
5132 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5133 */
5134 if ( pCtx->eflags.Bits.u1VM
5135 && pCtx->eflags.Bits.u2IOPL != 3
5136 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5137 && (pCtx->cr0 & X86_CR0_PE) )
5138 {
5139 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5140 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5141 u8Vector = X86_XCPT_GP;
5142 uErr = 0;
5143 }
5144#ifdef DBGFTRACE_ENABLED
5145 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5146 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5147 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5148#endif
5149
5150 /*
5151 * Do recursion accounting.
5152 */
5153 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5154 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5155 if (pVCpu->iem.s.cXcptRecursions == 0)
5156 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5157 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5158 else
5159 {
5160 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5161 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5162
5163 /** @todo double and tripple faults. */
5164 if (pVCpu->iem.s.cXcptRecursions >= 3)
5165 {
5166#ifdef DEBUG_bird
5167 AssertFailed();
5168#endif
5169 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5170 }
5171
5172 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5173 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5174 {
5175 ....
5176 } */
5177 }
5178 pVCpu->iem.s.cXcptRecursions++;
5179 pVCpu->iem.s.uCurXcpt = u8Vector;
5180 pVCpu->iem.s.fCurXcpt = fFlags;
5181
5182 /*
5183 * Extensive logging.
5184 */
5185#if defined(LOG_ENABLED) && defined(IN_RING3)
5186 if (LogIs3Enabled())
5187 {
5188 PVM pVM = pVCpu->CTX_SUFF(pVM);
5189 char szRegs[4096];
5190 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5191 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5192 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5193 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5194 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5195 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5196 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5197 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5198 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5199 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5200 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5201 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5202 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5203 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5204 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5205 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5206 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5207 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5208 " efer=%016VR{efer}\n"
5209 " pat=%016VR{pat}\n"
5210 " sf_mask=%016VR{sf_mask}\n"
5211 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5212 " lstar=%016VR{lstar}\n"
5213 " star=%016VR{star} cstar=%016VR{cstar}\n"
5214 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5215 );
5216
5217 char szInstr[256];
5218 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5219 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5220 szInstr, sizeof(szInstr), NULL);
5221 Log3(("%s%s\n", szRegs, szInstr));
5222 }
5223#endif /* LOG_ENABLED */
5224
5225 /*
5226 * Call the mode specific worker function.
5227 */
5228 VBOXSTRICTRC rcStrict;
5229 if (!(pCtx->cr0 & X86_CR0_PE))
5230 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5231 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5232 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5233 else
5234 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5235
5236 /* Flush the prefetch buffer. */
5237#ifdef IEM_WITH_CODE_TLB
5238 pVCpu->iem.s.pbInstrBuf = NULL;
5239#else
5240 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5241#endif
5242
5243 /*
5244 * Unwind.
5245 */
5246 pVCpu->iem.s.cXcptRecursions--;
5247 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5248 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5249 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5250 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5251 return rcStrict;
5252}
5253
5254#ifdef IEM_WITH_SETJMP
5255/**
5256 * See iemRaiseXcptOrInt. Will not return.
5257 */
5258IEM_STATIC DECL_NO_RETURN(void)
5259iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5260 uint8_t cbInstr,
5261 uint8_t u8Vector,
5262 uint32_t fFlags,
5263 uint16_t uErr,
5264 uint64_t uCr2)
5265{
5266 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5268}
5269#endif
5270
5271
5272/** \#DE - 00. */
5273DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5274{
5275 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5276}
5277
5278
5279/** \#DB - 01.
5280 * @note This automatically clear DR7.GD. */
5281DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5282{
5283 /** @todo set/clear RF. */
5284 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5286}
5287
5288
5289/** \#UD - 06. */
5290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5291{
5292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5293}
5294
5295
5296/** \#NM - 07. */
5297DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5298{
5299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5300}
5301
5302
5303/** \#TS(err) - 0a. */
5304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5305{
5306 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5307}
5308
5309
5310/** \#TS(tr) - 0a. */
5311DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5312{
5313 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5314 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5315}
5316
5317
5318/** \#TS(0) - 0a. */
5319DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5320{
5321 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5322 0, 0);
5323}
5324
5325
5326/** \#TS(err) - 0a. */
5327DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5328{
5329 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5330 uSel & X86_SEL_MASK_OFF_RPL, 0);
5331}
5332
5333
5334/** \#NP(err) - 0b. */
5335DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5336{
5337 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5338}
5339
5340
5341/** \#NP(sel) - 0b. */
5342DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5343{
5344 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5345 uSel & ~X86_SEL_RPL, 0);
5346}
5347
5348
5349/** \#SS(seg) - 0c. */
5350DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5351{
5352 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5353 uSel & ~X86_SEL_RPL, 0);
5354}
5355
5356
5357/** \#SS(err) - 0c. */
5358DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5359{
5360 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5361}
5362
5363
5364/** \#GP(n) - 0d. */
5365DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5366{
5367 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5368}
5369
5370
5371/** \#GP(0) - 0d. */
5372DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5373{
5374 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5375}
5376
5377#ifdef IEM_WITH_SETJMP
5378/** \#GP(0) - 0d. */
5379DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5380{
5381 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5382}
5383#endif
5384
5385
5386/** \#GP(sel) - 0d. */
5387DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5388{
5389 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5390 Sel & ~X86_SEL_RPL, 0);
5391}
5392
5393
5394/** \#GP(0) - 0d. */
5395DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5396{
5397 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5398}
5399
5400
5401/** \#GP(sel) - 0d. */
5402DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5403{
5404 NOREF(iSegReg); NOREF(fAccess);
5405 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5406 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5407}
5408
5409#ifdef IEM_WITH_SETJMP
5410/** \#GP(sel) - 0d, longjmp. */
5411DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5412{
5413 NOREF(iSegReg); NOREF(fAccess);
5414 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5415 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5416}
5417#endif
5418
5419/** \#GP(sel) - 0d. */
5420DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5421{
5422 NOREF(Sel);
5423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5424}
5425
5426#ifdef IEM_WITH_SETJMP
5427/** \#GP(sel) - 0d, longjmp. */
5428DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5429{
5430 NOREF(Sel);
5431 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5432}
5433#endif
5434
5435
5436/** \#GP(sel) - 0d. */
5437DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5438{
5439 NOREF(iSegReg); NOREF(fAccess);
5440 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5441}
5442
5443#ifdef IEM_WITH_SETJMP
5444/** \#GP(sel) - 0d, longjmp. */
5445DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5446 uint32_t fAccess)
5447{
5448 NOREF(iSegReg); NOREF(fAccess);
5449 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5450}
5451#endif
5452
5453
5454/** \#PF(n) - 0e. */
5455DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5456{
5457 uint16_t uErr;
5458 switch (rc)
5459 {
5460 case VERR_PAGE_NOT_PRESENT:
5461 case VERR_PAGE_TABLE_NOT_PRESENT:
5462 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5463 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5464 uErr = 0;
5465 break;
5466
5467 default:
5468 AssertMsgFailed(("%Rrc\n", rc));
5469 /* fall thru */
5470 case VERR_ACCESS_DENIED:
5471 uErr = X86_TRAP_PF_P;
5472 break;
5473
5474 /** @todo reserved */
5475 }
5476
5477 if (pVCpu->iem.s.uCpl == 3)
5478 uErr |= X86_TRAP_PF_US;
5479
5480 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5481 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5482 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5483 uErr |= X86_TRAP_PF_ID;
5484
5485#if 0 /* This is so much non-sense, really. Why was it done like that? */
5486 /* Note! RW access callers reporting a WRITE protection fault, will clear
5487 the READ flag before calling. So, read-modify-write accesses (RW)
5488 can safely be reported as READ faults. */
5489 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5490 uErr |= X86_TRAP_PF_RW;
5491#else
5492 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5493 {
5494 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5495 uErr |= X86_TRAP_PF_RW;
5496 }
5497#endif
5498
5499 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5500 uErr, GCPtrWhere);
5501}
5502
5503#ifdef IEM_WITH_SETJMP
5504/** \#PF(n) - 0e, longjmp. */
5505IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5506{
5507 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5508}
5509#endif
5510
5511
5512/** \#MF(0) - 10. */
5513DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5514{
5515 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5516}
5517
5518
5519/** \#AC(0) - 11. */
5520DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5521{
5522 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5523}
5524
5525
5526/**
5527 * Macro for calling iemCImplRaiseDivideError().
5528 *
5529 * This enables us to add/remove arguments and force different levels of
5530 * inlining as we wish.
5531 *
5532 * @return Strict VBox status code.
5533 */
5534#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5535IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5536{
5537 NOREF(cbInstr);
5538 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5539}
5540
5541
5542/**
5543 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5544 *
5545 * This enables us to add/remove arguments and force different levels of
5546 * inlining as we wish.
5547 *
5548 * @return Strict VBox status code.
5549 */
5550#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5551IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5552{
5553 NOREF(cbInstr);
5554 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5555}
5556
5557
5558/**
5559 * Macro for calling iemCImplRaiseInvalidOpcode().
5560 *
5561 * This enables us to add/remove arguments and force different levels of
5562 * inlining as we wish.
5563 *
5564 * @return Strict VBox status code.
5565 */
5566#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5567IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5568{
5569 NOREF(cbInstr);
5570 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5571}
5572
5573
5574/** @} */
5575
5576
5577/*
5578 *
5579 * Helpers routines.
5580 * Helpers routines.
5581 * Helpers routines.
5582 *
5583 */
5584
5585/**
5586 * Recalculates the effective operand size.
5587 *
5588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5589 */
5590IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5591{
5592 switch (pVCpu->iem.s.enmCpuMode)
5593 {
5594 case IEMMODE_16BIT:
5595 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5596 break;
5597 case IEMMODE_32BIT:
5598 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5599 break;
5600 case IEMMODE_64BIT:
5601 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5602 {
5603 case 0:
5604 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5605 break;
5606 case IEM_OP_PRF_SIZE_OP:
5607 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5608 break;
5609 case IEM_OP_PRF_SIZE_REX_W:
5610 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5611 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5612 break;
5613 }
5614 break;
5615 default:
5616 AssertFailed();
5617 }
5618}
5619
5620
5621/**
5622 * Sets the default operand size to 64-bit and recalculates the effective
5623 * operand size.
5624 *
5625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5626 */
5627IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5628{
5629 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5630 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5631 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5632 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5633 else
5634 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5635}
5636
5637
5638/*
5639 *
5640 * Common opcode decoders.
5641 * Common opcode decoders.
5642 * Common opcode decoders.
5643 *
5644 */
5645//#include <iprt/mem.h>
5646
5647/**
5648 * Used to add extra details about a stub case.
5649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5650 */
5651IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5652{
5653#if defined(LOG_ENABLED) && defined(IN_RING3)
5654 PVM pVM = pVCpu->CTX_SUFF(pVM);
5655 char szRegs[4096];
5656 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5657 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5658 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5659 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5660 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5661 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5662 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5663 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5664 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5665 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5666 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5667 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5668 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5669 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5670 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5671 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5672 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5673 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5674 " efer=%016VR{efer}\n"
5675 " pat=%016VR{pat}\n"
5676 " sf_mask=%016VR{sf_mask}\n"
5677 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5678 " lstar=%016VR{lstar}\n"
5679 " star=%016VR{star} cstar=%016VR{cstar}\n"
5680 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5681 );
5682
5683 char szInstr[256];
5684 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5685 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5686 szInstr, sizeof(szInstr), NULL);
5687
5688 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5689#else
5690 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5691#endif
5692}
5693
5694/**
5695 * Complains about a stub.
5696 *
5697 * Providing two versions of this macro, one for daily use and one for use when
5698 * working on IEM.
5699 */
5700#if 0
5701# define IEMOP_BITCH_ABOUT_STUB() \
5702 do { \
5703 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5704 iemOpStubMsg2(pVCpu); \
5705 RTAssertPanic(); \
5706 } while (0)
5707#else
5708# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5709#endif
5710
5711/** Stubs an opcode. */
5712#define FNIEMOP_STUB(a_Name) \
5713 FNIEMOP_DEF(a_Name) \
5714 { \
5715 RT_NOREF_PV(pVCpu); \
5716 IEMOP_BITCH_ABOUT_STUB(); \
5717 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5718 } \
5719 typedef int ignore_semicolon
5720
5721/** Stubs an opcode. */
5722#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5723 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5724 { \
5725 RT_NOREF_PV(pVCpu); \
5726 RT_NOREF_PV(a_Name0); \
5727 IEMOP_BITCH_ABOUT_STUB(); \
5728 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5729 } \
5730 typedef int ignore_semicolon
5731
5732/** Stubs an opcode which currently should raise \#UD. */
5733#define FNIEMOP_UD_STUB(a_Name) \
5734 FNIEMOP_DEF(a_Name) \
5735 { \
5736 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5737 return IEMOP_RAISE_INVALID_OPCODE(); \
5738 } \
5739 typedef int ignore_semicolon
5740
5741/** Stubs an opcode which currently should raise \#UD. */
5742#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5743 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5744 { \
5745 RT_NOREF_PV(pVCpu); \
5746 RT_NOREF_PV(a_Name0); \
5747 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5748 return IEMOP_RAISE_INVALID_OPCODE(); \
5749 } \
5750 typedef int ignore_semicolon
5751
5752
5753
5754/** @name Register Access.
5755 * @{
5756 */
5757
5758/**
5759 * Gets a reference (pointer) to the specified hidden segment register.
5760 *
5761 * @returns Hidden register reference.
5762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5763 * @param iSegReg The segment register.
5764 */
5765IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5766{
5767 Assert(iSegReg < X86_SREG_COUNT);
5768 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5769 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5770
5771#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5772 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5773 { /* likely */ }
5774 else
5775 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5776#else
5777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5778#endif
5779 return pSReg;
5780}
5781
5782
5783/**
5784 * Ensures that the given hidden segment register is up to date.
5785 *
5786 * @returns Hidden register reference.
5787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5788 * @param pSReg The segment register.
5789 */
5790IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5791{
5792#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5793 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5794 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5795#else
5796 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5797 NOREF(pVCpu);
5798#endif
5799 return pSReg;
5800}
5801
5802
5803/**
5804 * Gets a reference (pointer) to the specified segment register (the selector
5805 * value).
5806 *
5807 * @returns Pointer to the selector variable.
5808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5809 * @param iSegReg The segment register.
5810 */
5811DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5812{
5813 Assert(iSegReg < X86_SREG_COUNT);
5814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5815 return &pCtx->aSRegs[iSegReg].Sel;
5816}
5817
5818
5819/**
5820 * Fetches the selector value of a segment register.
5821 *
5822 * @returns The selector value.
5823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5824 * @param iSegReg The segment register.
5825 */
5826DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5827{
5828 Assert(iSegReg < X86_SREG_COUNT);
5829 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5830}
5831
5832
5833/**
5834 * Gets a reference (pointer) to the specified general purpose register.
5835 *
5836 * @returns Register reference.
5837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5838 * @param iReg The general purpose register.
5839 */
5840DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5841{
5842 Assert(iReg < 16);
5843 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5844 return &pCtx->aGRegs[iReg];
5845}
5846
5847
5848/**
5849 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5850 *
5851 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5852 *
5853 * @returns Register reference.
5854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5855 * @param iReg The register.
5856 */
5857DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5858{
5859 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5860 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5861 {
5862 Assert(iReg < 16);
5863 return &pCtx->aGRegs[iReg].u8;
5864 }
5865 /* high 8-bit register. */
5866 Assert(iReg < 8);
5867 return &pCtx->aGRegs[iReg & 3].bHi;
5868}
5869
5870
5871/**
5872 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5873 *
5874 * @returns Register reference.
5875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5876 * @param iReg The register.
5877 */
5878DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5879{
5880 Assert(iReg < 16);
5881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5882 return &pCtx->aGRegs[iReg].u16;
5883}
5884
5885
5886/**
5887 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5888 *
5889 * @returns Register reference.
5890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5891 * @param iReg The register.
5892 */
5893DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5894{
5895 Assert(iReg < 16);
5896 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5897 return &pCtx->aGRegs[iReg].u32;
5898}
5899
5900
5901/**
5902 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5903 *
5904 * @returns Register reference.
5905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5906 * @param iReg The register.
5907 */
5908DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5909{
5910 Assert(iReg < 64);
5911 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5912 return &pCtx->aGRegs[iReg].u64;
5913}
5914
5915
5916/**
5917 * Fetches the value of a 8-bit general purpose register.
5918 *
5919 * @returns The register value.
5920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5921 * @param iReg The register.
5922 */
5923DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5924{
5925 return *iemGRegRefU8(pVCpu, iReg);
5926}
5927
5928
5929/**
5930 * Fetches the value of a 16-bit general purpose register.
5931 *
5932 * @returns The register value.
5933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5934 * @param iReg The register.
5935 */
5936DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5937{
5938 Assert(iReg < 16);
5939 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5940}
5941
5942
5943/**
5944 * Fetches the value of a 32-bit general purpose register.
5945 *
5946 * @returns The register value.
5947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5948 * @param iReg The register.
5949 */
5950DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5951{
5952 Assert(iReg < 16);
5953 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5954}
5955
5956
5957/**
5958 * Fetches the value of a 64-bit general purpose register.
5959 *
5960 * @returns The register value.
5961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5962 * @param iReg The register.
5963 */
5964DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5965{
5966 Assert(iReg < 16);
5967 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5968}
5969
5970
5971/**
5972 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5973 *
5974 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5975 * segment limit.
5976 *
5977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5978 * @param offNextInstr The offset of the next instruction.
5979 */
5980IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5981{
5982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5983 switch (pVCpu->iem.s.enmEffOpSize)
5984 {
5985 case IEMMODE_16BIT:
5986 {
5987 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5988 if ( uNewIp > pCtx->cs.u32Limit
5989 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5990 return iemRaiseGeneralProtectionFault0(pVCpu);
5991 pCtx->rip = uNewIp;
5992 break;
5993 }
5994
5995 case IEMMODE_32BIT:
5996 {
5997 Assert(pCtx->rip <= UINT32_MAX);
5998 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5999
6000 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6001 if (uNewEip > pCtx->cs.u32Limit)
6002 return iemRaiseGeneralProtectionFault0(pVCpu);
6003 pCtx->rip = uNewEip;
6004 break;
6005 }
6006
6007 case IEMMODE_64BIT:
6008 {
6009 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6010
6011 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6012 if (!IEM_IS_CANONICAL(uNewRip))
6013 return iemRaiseGeneralProtectionFault0(pVCpu);
6014 pCtx->rip = uNewRip;
6015 break;
6016 }
6017
6018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6019 }
6020
6021 pCtx->eflags.Bits.u1RF = 0;
6022
6023#ifndef IEM_WITH_CODE_TLB
6024 /* Flush the prefetch buffer. */
6025 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6026#endif
6027
6028 return VINF_SUCCESS;
6029}
6030
6031
6032/**
6033 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6034 *
6035 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6036 * segment limit.
6037 *
6038 * @returns Strict VBox status code.
6039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6040 * @param offNextInstr The offset of the next instruction.
6041 */
6042IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6043{
6044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6045 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6046
6047 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6048 if ( uNewIp > pCtx->cs.u32Limit
6049 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6050 return iemRaiseGeneralProtectionFault0(pVCpu);
6051 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6052 pCtx->rip = uNewIp;
6053 pCtx->eflags.Bits.u1RF = 0;
6054
6055#ifndef IEM_WITH_CODE_TLB
6056 /* Flush the prefetch buffer. */
6057 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6058#endif
6059
6060 return VINF_SUCCESS;
6061}
6062
6063
6064/**
6065 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6066 *
6067 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6068 * segment limit.
6069 *
6070 * @returns Strict VBox status code.
6071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6072 * @param offNextInstr The offset of the next instruction.
6073 */
6074IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6075{
6076 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6077 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6078
6079 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6080 {
6081 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6082
6083 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6084 if (uNewEip > pCtx->cs.u32Limit)
6085 return iemRaiseGeneralProtectionFault0(pVCpu);
6086 pCtx->rip = uNewEip;
6087 }
6088 else
6089 {
6090 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6091
6092 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6093 if (!IEM_IS_CANONICAL(uNewRip))
6094 return iemRaiseGeneralProtectionFault0(pVCpu);
6095 pCtx->rip = uNewRip;
6096 }
6097 pCtx->eflags.Bits.u1RF = 0;
6098
6099#ifndef IEM_WITH_CODE_TLB
6100 /* Flush the prefetch buffer. */
6101 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6102#endif
6103
6104 return VINF_SUCCESS;
6105}
6106
6107
6108/**
6109 * Performs a near jump to the specified address.
6110 *
6111 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6112 * segment limit.
6113 *
6114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6115 * @param uNewRip The new RIP value.
6116 */
6117IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6118{
6119 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6120 switch (pVCpu->iem.s.enmEffOpSize)
6121 {
6122 case IEMMODE_16BIT:
6123 {
6124 Assert(uNewRip <= UINT16_MAX);
6125 if ( uNewRip > pCtx->cs.u32Limit
6126 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6127 return iemRaiseGeneralProtectionFault0(pVCpu);
6128 /** @todo Test 16-bit jump in 64-bit mode. */
6129 pCtx->rip = uNewRip;
6130 break;
6131 }
6132
6133 case IEMMODE_32BIT:
6134 {
6135 Assert(uNewRip <= UINT32_MAX);
6136 Assert(pCtx->rip <= UINT32_MAX);
6137 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6138
6139 if (uNewRip > pCtx->cs.u32Limit)
6140 return iemRaiseGeneralProtectionFault0(pVCpu);
6141 pCtx->rip = uNewRip;
6142 break;
6143 }
6144
6145 case IEMMODE_64BIT:
6146 {
6147 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6148
6149 if (!IEM_IS_CANONICAL(uNewRip))
6150 return iemRaiseGeneralProtectionFault0(pVCpu);
6151 pCtx->rip = uNewRip;
6152 break;
6153 }
6154
6155 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6156 }
6157
6158 pCtx->eflags.Bits.u1RF = 0;
6159
6160#ifndef IEM_WITH_CODE_TLB
6161 /* Flush the prefetch buffer. */
6162 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6163#endif
6164
6165 return VINF_SUCCESS;
6166}
6167
6168
6169/**
6170 * Get the address of the top of the stack.
6171 *
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param pCtx The CPU context which SP/ESP/RSP should be
6174 * read.
6175 */
6176DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6177{
6178 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6179 return pCtx->rsp;
6180 if (pCtx->ss.Attr.n.u1DefBig)
6181 return pCtx->esp;
6182 return pCtx->sp;
6183}
6184
6185
6186/**
6187 * Updates the RIP/EIP/IP to point to the next instruction.
6188 *
6189 * This function leaves the EFLAGS.RF flag alone.
6190 *
6191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6192 * @param cbInstr The number of bytes to add.
6193 */
6194IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6195{
6196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6197 switch (pVCpu->iem.s.enmCpuMode)
6198 {
6199 case IEMMODE_16BIT:
6200 Assert(pCtx->rip <= UINT16_MAX);
6201 pCtx->eip += cbInstr;
6202 pCtx->eip &= UINT32_C(0xffff);
6203 break;
6204
6205 case IEMMODE_32BIT:
6206 pCtx->eip += cbInstr;
6207 Assert(pCtx->rip <= UINT32_MAX);
6208 break;
6209
6210 case IEMMODE_64BIT:
6211 pCtx->rip += cbInstr;
6212 break;
6213 default: AssertFailed();
6214 }
6215}
6216
6217
6218#if 0
6219/**
6220 * Updates the RIP/EIP/IP to point to the next instruction.
6221 *
6222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6223 */
6224IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6225{
6226 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6227}
6228#endif
6229
6230
6231
6232/**
6233 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6234 *
6235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6236 * @param cbInstr The number of bytes to add.
6237 */
6238IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6239{
6240 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6241
6242 pCtx->eflags.Bits.u1RF = 0;
6243
6244 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6245#if ARCH_BITS >= 64
6246 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6247 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6248 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6249#else
6250 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6251 pCtx->rip += cbInstr;
6252 else
6253 {
6254 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6255 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6256 }
6257#endif
6258}
6259
6260
6261/**
6262 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6263 *
6264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6265 */
6266IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6267{
6268 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6269}
6270
6271
6272/**
6273 * Adds to the stack pointer.
6274 *
6275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6276 * @param pCtx The CPU context which SP/ESP/RSP should be
6277 * updated.
6278 * @param cbToAdd The number of bytes to add (8-bit!).
6279 */
6280DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6281{
6282 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6283 pCtx->rsp += cbToAdd;
6284 else if (pCtx->ss.Attr.n.u1DefBig)
6285 pCtx->esp += cbToAdd;
6286 else
6287 pCtx->sp += cbToAdd;
6288}
6289
6290
6291/**
6292 * Subtracts from the stack pointer.
6293 *
6294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6295 * @param pCtx The CPU context which SP/ESP/RSP should be
6296 * updated.
6297 * @param cbToSub The number of bytes to subtract (8-bit!).
6298 */
6299DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6300{
6301 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6302 pCtx->rsp -= cbToSub;
6303 else if (pCtx->ss.Attr.n.u1DefBig)
6304 pCtx->esp -= cbToSub;
6305 else
6306 pCtx->sp -= cbToSub;
6307}
6308
6309
6310/**
6311 * Adds to the temporary stack pointer.
6312 *
6313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6314 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6315 * @param cbToAdd The number of bytes to add (16-bit).
6316 * @param pCtx Where to get the current stack mode.
6317 */
6318DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6319{
6320 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6321 pTmpRsp->u += cbToAdd;
6322 else if (pCtx->ss.Attr.n.u1DefBig)
6323 pTmpRsp->DWords.dw0 += cbToAdd;
6324 else
6325 pTmpRsp->Words.w0 += cbToAdd;
6326}
6327
6328
6329/**
6330 * Subtracts from the temporary stack pointer.
6331 *
6332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6333 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6334 * @param cbToSub The number of bytes to subtract.
6335 * @param pCtx Where to get the current stack mode.
6336 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6337 * expecting that.
6338 */
6339DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6340{
6341 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6342 pTmpRsp->u -= cbToSub;
6343 else if (pCtx->ss.Attr.n.u1DefBig)
6344 pTmpRsp->DWords.dw0 -= cbToSub;
6345 else
6346 pTmpRsp->Words.w0 -= cbToSub;
6347}
6348
6349
6350/**
6351 * Calculates the effective stack address for a push of the specified size as
6352 * well as the new RSP value (upper bits may be masked).
6353 *
6354 * @returns Effective stack addressf for the push.
6355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6356 * @param pCtx Where to get the current stack mode.
6357 * @param cbItem The size of the stack item to pop.
6358 * @param puNewRsp Where to return the new RSP value.
6359 */
6360DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6361{
6362 RTUINT64U uTmpRsp;
6363 RTGCPTR GCPtrTop;
6364 uTmpRsp.u = pCtx->rsp;
6365
6366 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6367 GCPtrTop = uTmpRsp.u -= cbItem;
6368 else if (pCtx->ss.Attr.n.u1DefBig)
6369 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6370 else
6371 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6372 *puNewRsp = uTmpRsp.u;
6373 return GCPtrTop;
6374}
6375
6376
6377/**
6378 * Gets the current stack pointer and calculates the value after a pop of the
6379 * specified size.
6380 *
6381 * @returns Current stack pointer.
6382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6383 * @param pCtx Where to get the current stack mode.
6384 * @param cbItem The size of the stack item to pop.
6385 * @param puNewRsp Where to return the new RSP value.
6386 */
6387DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6388{
6389 RTUINT64U uTmpRsp;
6390 RTGCPTR GCPtrTop;
6391 uTmpRsp.u = pCtx->rsp;
6392
6393 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6394 {
6395 GCPtrTop = uTmpRsp.u;
6396 uTmpRsp.u += cbItem;
6397 }
6398 else if (pCtx->ss.Attr.n.u1DefBig)
6399 {
6400 GCPtrTop = uTmpRsp.DWords.dw0;
6401 uTmpRsp.DWords.dw0 += cbItem;
6402 }
6403 else
6404 {
6405 GCPtrTop = uTmpRsp.Words.w0;
6406 uTmpRsp.Words.w0 += cbItem;
6407 }
6408 *puNewRsp = uTmpRsp.u;
6409 return GCPtrTop;
6410}
6411
6412
6413/**
6414 * Calculates the effective stack address for a push of the specified size as
6415 * well as the new temporary RSP value (upper bits may be masked).
6416 *
6417 * @returns Effective stack addressf for the push.
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 * @param pCtx Where to get the current stack mode.
6420 * @param pTmpRsp The temporary stack pointer. This is updated.
6421 * @param cbItem The size of the stack item to pop.
6422 */
6423DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6424{
6425 RTGCPTR GCPtrTop;
6426
6427 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6428 GCPtrTop = pTmpRsp->u -= cbItem;
6429 else if (pCtx->ss.Attr.n.u1DefBig)
6430 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6431 else
6432 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6433 return GCPtrTop;
6434}
6435
6436
6437/**
6438 * Gets the effective stack address for a pop of the specified size and
6439 * calculates and updates the temporary RSP.
6440 *
6441 * @returns Current stack pointer.
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param pCtx Where to get the current stack mode.
6444 * @param pTmpRsp The temporary stack pointer. This is updated.
6445 * @param cbItem The size of the stack item to pop.
6446 */
6447DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6448{
6449 RTGCPTR GCPtrTop;
6450 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6451 {
6452 GCPtrTop = pTmpRsp->u;
6453 pTmpRsp->u += cbItem;
6454 }
6455 else if (pCtx->ss.Attr.n.u1DefBig)
6456 {
6457 GCPtrTop = pTmpRsp->DWords.dw0;
6458 pTmpRsp->DWords.dw0 += cbItem;
6459 }
6460 else
6461 {
6462 GCPtrTop = pTmpRsp->Words.w0;
6463 pTmpRsp->Words.w0 += cbItem;
6464 }
6465 return GCPtrTop;
6466}
6467
6468/** @} */
6469
6470
6471/** @name FPU access and helpers.
6472 *
6473 * @{
6474 */
6475
6476
6477/**
6478 * Hook for preparing to use the host FPU.
6479 *
6480 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 */
6484DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6485{
6486#ifdef IN_RING3
6487 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6488#else
6489 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6490#endif
6491}
6492
6493
6494/**
6495 * Hook for preparing to use the host FPU for SSE
6496 *
6497 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6498 *
6499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6500 */
6501DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6502{
6503 iemFpuPrepareUsage(pVCpu);
6504}
6505
6506
6507/**
6508 * Hook for actualizing the guest FPU state before the interpreter reads it.
6509 *
6510 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6511 *
6512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6513 */
6514DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6515{
6516#ifdef IN_RING3
6517 NOREF(pVCpu);
6518#else
6519 CPUMRZFpuStateActualizeForRead(pVCpu);
6520#endif
6521}
6522
6523
6524/**
6525 * Hook for actualizing the guest FPU state before the interpreter changes it.
6526 *
6527 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6528 *
6529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6530 */
6531DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6532{
6533#ifdef IN_RING3
6534 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6535#else
6536 CPUMRZFpuStateActualizeForChange(pVCpu);
6537#endif
6538}
6539
6540
6541/**
6542 * Hook for actualizing the guest XMM0..15 register state for read only.
6543 *
6544 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6545 *
6546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6547 */
6548DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6549{
6550#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6551 NOREF(pVCpu);
6552#else
6553 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6554#endif
6555}
6556
6557
6558/**
6559 * Hook for actualizing the guest XMM0..15 register state for read+write.
6560 *
6561 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6562 *
6563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6564 */
6565DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6566{
6567#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6568 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6569#else
6570 CPUMRZFpuStateActualizeForChange(pVCpu);
6571#endif
6572}
6573
6574
6575/**
6576 * Stores a QNaN value into a FPU register.
6577 *
6578 * @param pReg Pointer to the register.
6579 */
6580DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6581{
6582 pReg->au32[0] = UINT32_C(0x00000000);
6583 pReg->au32[1] = UINT32_C(0xc0000000);
6584 pReg->au16[4] = UINT16_C(0xffff);
6585}
6586
6587
6588/**
6589 * Updates the FOP, FPU.CS and FPUIP registers.
6590 *
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param pCtx The CPU context.
6593 * @param pFpuCtx The FPU context.
6594 */
6595DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6596{
6597 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6598 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6599 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6600 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6601 {
6602 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6603 * happens in real mode here based on the fnsave and fnstenv images. */
6604 pFpuCtx->CS = 0;
6605 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6606 }
6607 else
6608 {
6609 pFpuCtx->CS = pCtx->cs.Sel;
6610 pFpuCtx->FPUIP = pCtx->rip;
6611 }
6612}
6613
6614
6615/**
6616 * Updates the x87.DS and FPUDP registers.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param pCtx The CPU context.
6620 * @param pFpuCtx The FPU context.
6621 * @param iEffSeg The effective segment register.
6622 * @param GCPtrEff The effective address relative to @a iEffSeg.
6623 */
6624DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6625{
6626 RTSEL sel;
6627 switch (iEffSeg)
6628 {
6629 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6630 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6631 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6632 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6633 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6634 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6635 default:
6636 AssertMsgFailed(("%d\n", iEffSeg));
6637 sel = pCtx->ds.Sel;
6638 }
6639 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6640 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6641 {
6642 pFpuCtx->DS = 0;
6643 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6644 }
6645 else
6646 {
6647 pFpuCtx->DS = sel;
6648 pFpuCtx->FPUDP = GCPtrEff;
6649 }
6650}
6651
6652
6653/**
6654 * Rotates the stack registers in the push direction.
6655 *
6656 * @param pFpuCtx The FPU context.
6657 * @remarks This is a complete waste of time, but fxsave stores the registers in
6658 * stack order.
6659 */
6660DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6661{
6662 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6663 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6664 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6665 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6666 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6667 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6668 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6669 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6670 pFpuCtx->aRegs[0].r80 = r80Tmp;
6671}
6672
6673
6674/**
6675 * Rotates the stack registers in the pop direction.
6676 *
6677 * @param pFpuCtx The FPU context.
6678 * @remarks This is a complete waste of time, but fxsave stores the registers in
6679 * stack order.
6680 */
6681DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6682{
6683 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6684 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6685 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6686 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6687 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6688 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6689 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6690 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6691 pFpuCtx->aRegs[7].r80 = r80Tmp;
6692}
6693
6694
6695/**
6696 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6697 * exception prevents it.
6698 *
6699 * @param pResult The FPU operation result to push.
6700 * @param pFpuCtx The FPU context.
6701 */
6702IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6703{
6704 /* Update FSW and bail if there are pending exceptions afterwards. */
6705 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6706 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6707 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6708 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6709 {
6710 pFpuCtx->FSW = fFsw;
6711 return;
6712 }
6713
6714 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6715 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6716 {
6717 /* All is fine, push the actual value. */
6718 pFpuCtx->FTW |= RT_BIT(iNewTop);
6719 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6720 }
6721 else if (pFpuCtx->FCW & X86_FCW_IM)
6722 {
6723 /* Masked stack overflow, push QNaN. */
6724 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6725 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6726 }
6727 else
6728 {
6729 /* Raise stack overflow, don't push anything. */
6730 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6731 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6732 return;
6733 }
6734
6735 fFsw &= ~X86_FSW_TOP_MASK;
6736 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6737 pFpuCtx->FSW = fFsw;
6738
6739 iemFpuRotateStackPush(pFpuCtx);
6740}
6741
6742
6743/**
6744 * Stores a result in a FPU register and updates the FSW and FTW.
6745 *
6746 * @param pFpuCtx The FPU context.
6747 * @param pResult The result to store.
6748 * @param iStReg Which FPU register to store it in.
6749 */
6750IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6751{
6752 Assert(iStReg < 8);
6753 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6754 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6755 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6756 pFpuCtx->FTW |= RT_BIT(iReg);
6757 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6758}
6759
6760
6761/**
6762 * Only updates the FPU status word (FSW) with the result of the current
6763 * instruction.
6764 *
6765 * @param pFpuCtx The FPU context.
6766 * @param u16FSW The FSW output of the current instruction.
6767 */
6768IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6769{
6770 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6771 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6772}
6773
6774
6775/**
6776 * Pops one item off the FPU stack if no pending exception prevents it.
6777 *
6778 * @param pFpuCtx The FPU context.
6779 */
6780IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6781{
6782 /* Check pending exceptions. */
6783 uint16_t uFSW = pFpuCtx->FSW;
6784 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6785 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6786 return;
6787
6788 /* TOP--. */
6789 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6790 uFSW &= ~X86_FSW_TOP_MASK;
6791 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6792 pFpuCtx->FSW = uFSW;
6793
6794 /* Mark the previous ST0 as empty. */
6795 iOldTop >>= X86_FSW_TOP_SHIFT;
6796 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6797
6798 /* Rotate the registers. */
6799 iemFpuRotateStackPop(pFpuCtx);
6800}
6801
6802
6803/**
6804 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6805 *
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 * @param pResult The FPU operation result to push.
6808 */
6809IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6810{
6811 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6812 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6813 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6814 iemFpuMaybePushResult(pResult, pFpuCtx);
6815}
6816
6817
6818/**
6819 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6820 * and sets FPUDP and FPUDS.
6821 *
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 * @param pResult The FPU operation result to push.
6824 * @param iEffSeg The effective segment register.
6825 * @param GCPtrEff The effective address relative to @a iEffSeg.
6826 */
6827IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6828{
6829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6830 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6831 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6832 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6833 iemFpuMaybePushResult(pResult, pFpuCtx);
6834}
6835
6836
6837/**
6838 * Replace ST0 with the first value and push the second onto the FPU stack,
6839 * unless a pending exception prevents it.
6840 *
6841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6842 * @param pResult The FPU operation result to store and push.
6843 */
6844IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6845{
6846 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6847 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6848 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6849
6850 /* Update FSW and bail if there are pending exceptions afterwards. */
6851 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6852 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6853 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6854 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6855 {
6856 pFpuCtx->FSW = fFsw;
6857 return;
6858 }
6859
6860 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6861 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6862 {
6863 /* All is fine, push the actual value. */
6864 pFpuCtx->FTW |= RT_BIT(iNewTop);
6865 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6866 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6867 }
6868 else if (pFpuCtx->FCW & X86_FCW_IM)
6869 {
6870 /* Masked stack overflow, push QNaN. */
6871 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6872 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6873 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6874 }
6875 else
6876 {
6877 /* Raise stack overflow, don't push anything. */
6878 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6879 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6880 return;
6881 }
6882
6883 fFsw &= ~X86_FSW_TOP_MASK;
6884 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6885 pFpuCtx->FSW = fFsw;
6886
6887 iemFpuRotateStackPush(pFpuCtx);
6888}
6889
6890
6891/**
6892 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6893 * FOP.
6894 *
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 * @param pResult The result to store.
6897 * @param iStReg Which FPU register to store it in.
6898 */
6899IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6900{
6901 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6902 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6903 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6904 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6905}
6906
6907
6908/**
6909 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6910 * FOP, and then pops the stack.
6911 *
6912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6913 * @param pResult The result to store.
6914 * @param iStReg Which FPU register to store it in.
6915 */
6916IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6917{
6918 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6919 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6920 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6921 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6922 iemFpuMaybePopOne(pFpuCtx);
6923}
6924
6925
6926/**
6927 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6928 * FPUDP, and FPUDS.
6929 *
6930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6931 * @param pResult The result to store.
6932 * @param iStReg Which FPU register to store it in.
6933 * @param iEffSeg The effective memory operand selector register.
6934 * @param GCPtrEff The effective memory operand offset.
6935 */
6936IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6937 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6938{
6939 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6940 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6941 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6943 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6944}
6945
6946
6947/**
6948 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6949 * FPUDP, and FPUDS, and then pops the stack.
6950 *
6951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6952 * @param pResult The result to store.
6953 * @param iStReg Which FPU register to store it in.
6954 * @param iEffSeg The effective memory operand selector register.
6955 * @param GCPtrEff The effective memory operand offset.
6956 */
6957IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6958 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6959{
6960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6961 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6962 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6963 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6964 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6965 iemFpuMaybePopOne(pFpuCtx);
6966}
6967
6968
6969/**
6970 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6971 *
6972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6973 */
6974IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6975{
6976 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6977 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6978 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6979}
6980
6981
6982/**
6983 * Marks the specified stack register as free (for FFREE).
6984 *
6985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6986 * @param iStReg The register to free.
6987 */
6988IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6989{
6990 Assert(iStReg < 8);
6991 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6992 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6993 pFpuCtx->FTW &= ~RT_BIT(iReg);
6994}
6995
6996
6997/**
6998 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6999 *
7000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7001 */
7002IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7003{
7004 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7005 uint16_t uFsw = pFpuCtx->FSW;
7006 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7007 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7008 uFsw &= ~X86_FSW_TOP_MASK;
7009 uFsw |= uTop;
7010 pFpuCtx->FSW = uFsw;
7011}
7012
7013
7014/**
7015 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7016 *
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 */
7019IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7020{
7021 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7022 uint16_t uFsw = pFpuCtx->FSW;
7023 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7024 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7025 uFsw &= ~X86_FSW_TOP_MASK;
7026 uFsw |= uTop;
7027 pFpuCtx->FSW = uFsw;
7028}
7029
7030
7031/**
7032 * Updates the FSW, FOP, FPUIP, and FPUCS.
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 * @param u16FSW The FSW from the current instruction.
7036 */
7037IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7038{
7039 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7040 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7041 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7042 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7043}
7044
7045
7046/**
7047 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7048 *
7049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7050 * @param u16FSW The FSW from the current instruction.
7051 */
7052IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7053{
7054 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7055 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7056 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7057 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7058 iemFpuMaybePopOne(pFpuCtx);
7059}
7060
7061
7062/**
7063 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7064 *
7065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7066 * @param u16FSW The FSW from the current instruction.
7067 * @param iEffSeg The effective memory operand selector register.
7068 * @param GCPtrEff The effective memory operand offset.
7069 */
7070IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7071{
7072 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7073 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7074 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7075 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7076 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7077}
7078
7079
7080/**
7081 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7082 *
7083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7084 * @param u16FSW The FSW from the current instruction.
7085 */
7086IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7087{
7088 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7089 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7090 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7091 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7092 iemFpuMaybePopOne(pFpuCtx);
7093 iemFpuMaybePopOne(pFpuCtx);
7094}
7095
7096
7097/**
7098 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7099 *
7100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7101 * @param u16FSW The FSW from the current instruction.
7102 * @param iEffSeg The effective memory operand selector register.
7103 * @param GCPtrEff The effective memory operand offset.
7104 */
7105IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7106{
7107 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7108 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7109 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7110 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7111 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7112 iemFpuMaybePopOne(pFpuCtx);
7113}
7114
7115
7116/**
7117 * Worker routine for raising an FPU stack underflow exception.
7118 *
7119 * @param pFpuCtx The FPU context.
7120 * @param iStReg The stack register being accessed.
7121 */
7122IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7123{
7124 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7125 if (pFpuCtx->FCW & X86_FCW_IM)
7126 {
7127 /* Masked underflow. */
7128 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7129 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7130 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7131 if (iStReg != UINT8_MAX)
7132 {
7133 pFpuCtx->FTW |= RT_BIT(iReg);
7134 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7135 }
7136 }
7137 else
7138 {
7139 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7140 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7141 }
7142}
7143
7144
7145/**
7146 * Raises a FPU stack underflow exception.
7147 *
7148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7149 * @param iStReg The destination register that should be loaded
7150 * with QNaN if \#IS is not masked. Specify
7151 * UINT8_MAX if none (like for fcom).
7152 */
7153DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7154{
7155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7156 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7157 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7158 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7159}
7160
7161
7162DECL_NO_INLINE(IEM_STATIC, void)
7163iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7164{
7165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7166 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7167 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7168 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7169 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7170}
7171
7172
7173DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7174{
7175 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7176 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7177 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7178 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7179 iemFpuMaybePopOne(pFpuCtx);
7180}
7181
7182
7183DECL_NO_INLINE(IEM_STATIC, void)
7184iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7185{
7186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7187 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7188 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7189 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7190 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7191 iemFpuMaybePopOne(pFpuCtx);
7192}
7193
7194
7195DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7196{
7197 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7198 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7199 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7200 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7201 iemFpuMaybePopOne(pFpuCtx);
7202 iemFpuMaybePopOne(pFpuCtx);
7203}
7204
7205
7206DECL_NO_INLINE(IEM_STATIC, void)
7207iemFpuStackPushUnderflow(PVMCPU pVCpu)
7208{
7209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7210 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7211 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7212
7213 if (pFpuCtx->FCW & X86_FCW_IM)
7214 {
7215 /* Masked overflow - Push QNaN. */
7216 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7217 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7218 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7219 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7220 pFpuCtx->FTW |= RT_BIT(iNewTop);
7221 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7222 iemFpuRotateStackPush(pFpuCtx);
7223 }
7224 else
7225 {
7226 /* Exception pending - don't change TOP or the register stack. */
7227 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7228 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7229 }
7230}
7231
7232
7233DECL_NO_INLINE(IEM_STATIC, void)
7234iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7235{
7236 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7237 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7238 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7239
7240 if (pFpuCtx->FCW & X86_FCW_IM)
7241 {
7242 /* Masked overflow - Push QNaN. */
7243 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7244 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7245 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7246 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7247 pFpuCtx->FTW |= RT_BIT(iNewTop);
7248 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7249 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7250 iemFpuRotateStackPush(pFpuCtx);
7251 }
7252 else
7253 {
7254 /* Exception pending - don't change TOP or the register stack. */
7255 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7256 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7257 }
7258}
7259
7260
7261/**
7262 * Worker routine for raising an FPU stack overflow exception on a push.
7263 *
7264 * @param pFpuCtx The FPU context.
7265 */
7266IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7267{
7268 if (pFpuCtx->FCW & X86_FCW_IM)
7269 {
7270 /* Masked overflow. */
7271 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7272 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7273 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7274 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7275 pFpuCtx->FTW |= RT_BIT(iNewTop);
7276 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7277 iemFpuRotateStackPush(pFpuCtx);
7278 }
7279 else
7280 {
7281 /* Exception pending - don't change TOP or the register stack. */
7282 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7283 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7284 }
7285}
7286
7287
7288/**
7289 * Raises a FPU stack overflow exception on a push.
7290 *
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 */
7293DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7294{
7295 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7296 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7298 iemFpuStackPushOverflowOnly(pFpuCtx);
7299}
7300
7301
7302/**
7303 * Raises a FPU stack overflow exception on a push with a memory operand.
7304 *
7305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7306 * @param iEffSeg The effective memory operand selector register.
7307 * @param GCPtrEff The effective memory operand offset.
7308 */
7309DECL_NO_INLINE(IEM_STATIC, void)
7310iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7311{
7312 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7313 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7314 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7315 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7316 iemFpuStackPushOverflowOnly(pFpuCtx);
7317}
7318
7319
7320IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7321{
7322 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7323 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7324 if (pFpuCtx->FTW & RT_BIT(iReg))
7325 return VINF_SUCCESS;
7326 return VERR_NOT_FOUND;
7327}
7328
7329
7330IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7331{
7332 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7333 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7334 if (pFpuCtx->FTW & RT_BIT(iReg))
7335 {
7336 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7337 return VINF_SUCCESS;
7338 }
7339 return VERR_NOT_FOUND;
7340}
7341
7342
7343IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7344 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7345{
7346 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7347 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7348 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7349 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7350 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7351 {
7352 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7353 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7354 return VINF_SUCCESS;
7355 }
7356 return VERR_NOT_FOUND;
7357}
7358
7359
7360IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7361{
7362 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7363 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7364 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7365 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7366 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7367 {
7368 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7369 return VINF_SUCCESS;
7370 }
7371 return VERR_NOT_FOUND;
7372}
7373
7374
7375/**
7376 * Updates the FPU exception status after FCW is changed.
7377 *
7378 * @param pFpuCtx The FPU context.
7379 */
7380IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7381{
7382 uint16_t u16Fsw = pFpuCtx->FSW;
7383 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7384 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7385 else
7386 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7387 pFpuCtx->FSW = u16Fsw;
7388}
7389
7390
7391/**
7392 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7393 *
7394 * @returns The full FTW.
7395 * @param pFpuCtx The FPU context.
7396 */
7397IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7398{
7399 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7400 uint16_t u16Ftw = 0;
7401 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7402 for (unsigned iSt = 0; iSt < 8; iSt++)
7403 {
7404 unsigned const iReg = (iSt + iTop) & 7;
7405 if (!(u8Ftw & RT_BIT(iReg)))
7406 u16Ftw |= 3 << (iReg * 2); /* empty */
7407 else
7408 {
7409 uint16_t uTag;
7410 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7411 if (pr80Reg->s.uExponent == 0x7fff)
7412 uTag = 2; /* Exponent is all 1's => Special. */
7413 else if (pr80Reg->s.uExponent == 0x0000)
7414 {
7415 if (pr80Reg->s.u64Mantissa == 0x0000)
7416 uTag = 1; /* All bits are zero => Zero. */
7417 else
7418 uTag = 2; /* Must be special. */
7419 }
7420 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7421 uTag = 0; /* Valid. */
7422 else
7423 uTag = 2; /* Must be special. */
7424
7425 u16Ftw |= uTag << (iReg * 2); /* empty */
7426 }
7427 }
7428
7429 return u16Ftw;
7430}
7431
7432
7433/**
7434 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7435 *
7436 * @returns The compressed FTW.
7437 * @param u16FullFtw The full FTW to convert.
7438 */
7439IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7440{
7441 uint8_t u8Ftw = 0;
7442 for (unsigned i = 0; i < 8; i++)
7443 {
7444 if ((u16FullFtw & 3) != 3 /*empty*/)
7445 u8Ftw |= RT_BIT(i);
7446 u16FullFtw >>= 2;
7447 }
7448
7449 return u8Ftw;
7450}
7451
7452/** @} */
7453
7454
7455/** @name Memory access.
7456 *
7457 * @{
7458 */
7459
7460
7461/**
7462 * Updates the IEMCPU::cbWritten counter if applicable.
7463 *
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 * @param fAccess The access being accounted for.
7466 * @param cbMem The access size.
7467 */
7468DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7469{
7470 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7471 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7472 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7473}
7474
7475
7476/**
7477 * Checks if the given segment can be written to, raise the appropriate
7478 * exception if not.
7479 *
7480 * @returns VBox strict status code.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param pHid Pointer to the hidden register.
7484 * @param iSegReg The register number.
7485 * @param pu64BaseAddr Where to return the base address to use for the
7486 * segment. (In 64-bit code it may differ from the
7487 * base in the hidden segment.)
7488 */
7489IEM_STATIC VBOXSTRICTRC
7490iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7491{
7492 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7493 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7494 else
7495 {
7496 if (!pHid->Attr.n.u1Present)
7497 {
7498 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7499 AssertRelease(uSel == 0);
7500 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7501 return iemRaiseGeneralProtectionFault0(pVCpu);
7502 }
7503
7504 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7505 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7506 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7507 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7508 *pu64BaseAddr = pHid->u64Base;
7509 }
7510 return VINF_SUCCESS;
7511}
7512
7513
7514/**
7515 * Checks if the given segment can be read from, raise the appropriate
7516 * exception if not.
7517 *
7518 * @returns VBox strict status code.
7519 *
7520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7521 * @param pHid Pointer to the hidden register.
7522 * @param iSegReg The register number.
7523 * @param pu64BaseAddr Where to return the base address to use for the
7524 * segment. (In 64-bit code it may differ from the
7525 * base in the hidden segment.)
7526 */
7527IEM_STATIC VBOXSTRICTRC
7528iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7529{
7530 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7531 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7532 else
7533 {
7534 if (!pHid->Attr.n.u1Present)
7535 {
7536 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7537 AssertRelease(uSel == 0);
7538 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7539 return iemRaiseGeneralProtectionFault0(pVCpu);
7540 }
7541
7542 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7543 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7544 *pu64BaseAddr = pHid->u64Base;
7545 }
7546 return VINF_SUCCESS;
7547}
7548
7549
7550/**
7551 * Applies the segment limit, base and attributes.
7552 *
7553 * This may raise a \#GP or \#SS.
7554 *
7555 * @returns VBox strict status code.
7556 *
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param fAccess The kind of access which is being performed.
7559 * @param iSegReg The index of the segment register to apply.
7560 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7561 * TSS, ++).
7562 * @param cbMem The access size.
7563 * @param pGCPtrMem Pointer to the guest memory address to apply
7564 * segmentation to. Input and output parameter.
7565 */
7566IEM_STATIC VBOXSTRICTRC
7567iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7568{
7569 if (iSegReg == UINT8_MAX)
7570 return VINF_SUCCESS;
7571
7572 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7573 switch (pVCpu->iem.s.enmCpuMode)
7574 {
7575 case IEMMODE_16BIT:
7576 case IEMMODE_32BIT:
7577 {
7578 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7579 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7580
7581 if ( pSel->Attr.n.u1Present
7582 && !pSel->Attr.n.u1Unusable)
7583 {
7584 Assert(pSel->Attr.n.u1DescType);
7585 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7586 {
7587 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7588 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7589 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7590
7591 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7592 {
7593 /** @todo CPL check. */
7594 }
7595
7596 /*
7597 * There are two kinds of data selectors, normal and expand down.
7598 */
7599 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7600 {
7601 if ( GCPtrFirst32 > pSel->u32Limit
7602 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7603 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7604 }
7605 else
7606 {
7607 /*
7608 * The upper boundary is defined by the B bit, not the G bit!
7609 */
7610 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7611 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7612 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7613 }
7614 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7615 }
7616 else
7617 {
7618
7619 /*
7620 * Code selector and usually be used to read thru, writing is
7621 * only permitted in real and V8086 mode.
7622 */
7623 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7624 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7625 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7626 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7627 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7628
7629 if ( GCPtrFirst32 > pSel->u32Limit
7630 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7631 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7632
7633 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7634 {
7635 /** @todo CPL check. */
7636 }
7637
7638 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7639 }
7640 }
7641 else
7642 return iemRaiseGeneralProtectionFault0(pVCpu);
7643 return VINF_SUCCESS;
7644 }
7645
7646 case IEMMODE_64BIT:
7647 {
7648 RTGCPTR GCPtrMem = *pGCPtrMem;
7649 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7650 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7651
7652 Assert(cbMem >= 1);
7653 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7654 return VINF_SUCCESS;
7655 return iemRaiseGeneralProtectionFault0(pVCpu);
7656 }
7657
7658 default:
7659 AssertFailedReturn(VERR_IEM_IPE_7);
7660 }
7661}
7662
7663
7664/**
7665 * Translates a virtual address to a physical physical address and checks if we
7666 * can access the page as specified.
7667 *
7668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7669 * @param GCPtrMem The virtual address.
7670 * @param fAccess The intended access.
7671 * @param pGCPhysMem Where to return the physical address.
7672 */
7673IEM_STATIC VBOXSTRICTRC
7674iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7675{
7676 /** @todo Need a different PGM interface here. We're currently using
7677 * generic / REM interfaces. this won't cut it for R0 & RC. */
7678 RTGCPHYS GCPhys;
7679 uint64_t fFlags;
7680 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7681 if (RT_FAILURE(rc))
7682 {
7683 /** @todo Check unassigned memory in unpaged mode. */
7684 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7685 *pGCPhysMem = NIL_RTGCPHYS;
7686 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7687 }
7688
7689 /* If the page is writable and does not have the no-exec bit set, all
7690 access is allowed. Otherwise we'll have to check more carefully... */
7691 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7692 {
7693 /* Write to read only memory? */
7694 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7695 && !(fFlags & X86_PTE_RW)
7696 && ( (pVCpu->iem.s.uCpl == 3
7697 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7698 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7699 {
7700 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7701 *pGCPhysMem = NIL_RTGCPHYS;
7702 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7703 }
7704
7705 /* Kernel memory accessed by userland? */
7706 if ( !(fFlags & X86_PTE_US)
7707 && pVCpu->iem.s.uCpl == 3
7708 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7709 {
7710 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7711 *pGCPhysMem = NIL_RTGCPHYS;
7712 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7713 }
7714
7715 /* Executing non-executable memory? */
7716 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7717 && (fFlags & X86_PTE_PAE_NX)
7718 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7719 {
7720 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7721 *pGCPhysMem = NIL_RTGCPHYS;
7722 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7723 VERR_ACCESS_DENIED);
7724 }
7725 }
7726
7727 /*
7728 * Set the dirty / access flags.
7729 * ASSUMES this is set when the address is translated rather than on committ...
7730 */
7731 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7732 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7733 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7734 {
7735 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7736 AssertRC(rc2);
7737 }
7738
7739 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7740 *pGCPhysMem = GCPhys;
7741 return VINF_SUCCESS;
7742}
7743
7744
7745
7746/**
7747 * Maps a physical page.
7748 *
7749 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7751 * @param GCPhysMem The physical address.
7752 * @param fAccess The intended access.
7753 * @param ppvMem Where to return the mapping address.
7754 * @param pLock The PGM lock.
7755 */
7756IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7757{
7758#ifdef IEM_VERIFICATION_MODE_FULL
7759 /* Force the alternative path so we can ignore writes. */
7760 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7761 {
7762 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7763 {
7764 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7765 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7766 if (RT_FAILURE(rc2))
7767 pVCpu->iem.s.fProblematicMemory = true;
7768 }
7769 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7770 }
7771#endif
7772#ifdef IEM_LOG_MEMORY_WRITES
7773 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7774 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7775#endif
7776#ifdef IEM_VERIFICATION_MODE_MINIMAL
7777 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7778#endif
7779
7780 /** @todo This API may require some improving later. A private deal with PGM
7781 * regarding locking and unlocking needs to be struct. A couple of TLBs
7782 * living in PGM, but with publicly accessible inlined access methods
7783 * could perhaps be an even better solution. */
7784 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7785 GCPhysMem,
7786 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7787 pVCpu->iem.s.fBypassHandlers,
7788 ppvMem,
7789 pLock);
7790 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7791 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7792
7793#ifdef IEM_VERIFICATION_MODE_FULL
7794 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7795 pVCpu->iem.s.fProblematicMemory = true;
7796#endif
7797 return rc;
7798}
7799
7800
7801/**
7802 * Unmap a page previously mapped by iemMemPageMap.
7803 *
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 * @param GCPhysMem The physical address.
7806 * @param fAccess The intended access.
7807 * @param pvMem What iemMemPageMap returned.
7808 * @param pLock The PGM lock.
7809 */
7810DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7811{
7812 NOREF(pVCpu);
7813 NOREF(GCPhysMem);
7814 NOREF(fAccess);
7815 NOREF(pvMem);
7816 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7817}
7818
7819
7820/**
7821 * Looks up a memory mapping entry.
7822 *
7823 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7825 * @param pvMem The memory address.
7826 * @param fAccess The access to.
7827 */
7828DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7829{
7830 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7831 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7832 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7833 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7834 return 0;
7835 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7836 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7837 return 1;
7838 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7839 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7840 return 2;
7841 return VERR_NOT_FOUND;
7842}
7843
7844
7845/**
7846 * Finds a free memmap entry when using iNextMapping doesn't work.
7847 *
7848 * @returns Memory mapping index, 1024 on failure.
7849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7850 */
7851IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7852{
7853 /*
7854 * The easy case.
7855 */
7856 if (pVCpu->iem.s.cActiveMappings == 0)
7857 {
7858 pVCpu->iem.s.iNextMapping = 1;
7859 return 0;
7860 }
7861
7862 /* There should be enough mappings for all instructions. */
7863 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7864
7865 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7866 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7867 return i;
7868
7869 AssertFailedReturn(1024);
7870}
7871
7872
7873/**
7874 * Commits a bounce buffer that needs writing back and unmaps it.
7875 *
7876 * @returns Strict VBox status code.
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param iMemMap The index of the buffer to commit.
7879 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7880 * Always false in ring-3, obviously.
7881 */
7882IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7883{
7884 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7885 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7886#ifdef IN_RING3
7887 Assert(!fPostponeFail);
7888 RT_NOREF_PV(fPostponeFail);
7889#endif
7890
7891 /*
7892 * Do the writing.
7893 */
7894#ifndef IEM_VERIFICATION_MODE_MINIMAL
7895 PVM pVM = pVCpu->CTX_SUFF(pVM);
7896 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7897 && !IEM_VERIFICATION_ENABLED(pVCpu))
7898 {
7899 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7900 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7901 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7902 if (!pVCpu->iem.s.fBypassHandlers)
7903 {
7904 /*
7905 * Carefully and efficiently dealing with access handler return
7906 * codes make this a little bloated.
7907 */
7908 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7910 pbBuf,
7911 cbFirst,
7912 PGMACCESSORIGIN_IEM);
7913 if (rcStrict == VINF_SUCCESS)
7914 {
7915 if (cbSecond)
7916 {
7917 rcStrict = PGMPhysWrite(pVM,
7918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7919 pbBuf + cbFirst,
7920 cbSecond,
7921 PGMACCESSORIGIN_IEM);
7922 if (rcStrict == VINF_SUCCESS)
7923 { /* nothing */ }
7924 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7925 {
7926 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7928 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7929 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7930 }
7931# ifndef IN_RING3
7932 else if (fPostponeFail)
7933 {
7934 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7936 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7937 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7938 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7939 return iemSetPassUpStatus(pVCpu, rcStrict);
7940 }
7941# endif
7942 else
7943 {
7944 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7945 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7947 return rcStrict;
7948 }
7949 }
7950 }
7951 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7952 {
7953 if (!cbSecond)
7954 {
7955 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7956 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7957 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7958 }
7959 else
7960 {
7961 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7963 pbBuf + cbFirst,
7964 cbSecond,
7965 PGMACCESSORIGIN_IEM);
7966 if (rcStrict2 == VINF_SUCCESS)
7967 {
7968 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7969 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7970 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7971 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7972 }
7973 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7974 {
7975 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7976 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7977 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7978 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7979 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7980 }
7981# ifndef IN_RING3
7982 else if (fPostponeFail)
7983 {
7984 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7985 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7986 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7987 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7988 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7989 return iemSetPassUpStatus(pVCpu, rcStrict);
7990 }
7991# endif
7992 else
7993 {
7994 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7995 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7996 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7997 return rcStrict2;
7998 }
7999 }
8000 }
8001# ifndef IN_RING3
8002 else if (fPostponeFail)
8003 {
8004 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8005 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8006 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8007 if (!cbSecond)
8008 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8009 else
8010 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8011 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8012 return iemSetPassUpStatus(pVCpu, rcStrict);
8013 }
8014# endif
8015 else
8016 {
8017 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8018 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8019 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8020 return rcStrict;
8021 }
8022 }
8023 else
8024 {
8025 /*
8026 * No access handlers, much simpler.
8027 */
8028 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8029 if (RT_SUCCESS(rc))
8030 {
8031 if (cbSecond)
8032 {
8033 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8034 if (RT_SUCCESS(rc))
8035 { /* likely */ }
8036 else
8037 {
8038 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8039 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8040 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8041 return rc;
8042 }
8043 }
8044 }
8045 else
8046 {
8047 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8049 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8050 return rc;
8051 }
8052 }
8053 }
8054#endif
8055
8056#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8057 /*
8058 * Record the write(s).
8059 */
8060 if (!pVCpu->iem.s.fNoRem)
8061 {
8062 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8063 if (pEvtRec)
8064 {
8065 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8066 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8067 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8068 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8069 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8070 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8071 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8072 }
8073 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8074 {
8075 pEvtRec = iemVerifyAllocRecord(pVCpu);
8076 if (pEvtRec)
8077 {
8078 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8079 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8080 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8081 memcpy(pEvtRec->u.RamWrite.ab,
8082 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8083 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8084 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8085 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8086 }
8087 }
8088 }
8089#endif
8090#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8091 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8092 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8093 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8094 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8095 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8096 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8097
8098 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8099 g_cbIemWrote = cbWrote;
8100 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8101#endif
8102
8103 /*
8104 * Free the mapping entry.
8105 */
8106 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8107 Assert(pVCpu->iem.s.cActiveMappings != 0);
8108 pVCpu->iem.s.cActiveMappings--;
8109 return VINF_SUCCESS;
8110}
8111
8112
8113/**
8114 * iemMemMap worker that deals with a request crossing pages.
8115 */
8116IEM_STATIC VBOXSTRICTRC
8117iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8118{
8119 /*
8120 * Do the address translations.
8121 */
8122 RTGCPHYS GCPhysFirst;
8123 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8124 if (rcStrict != VINF_SUCCESS)
8125 return rcStrict;
8126
8127 RTGCPHYS GCPhysSecond;
8128 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8129 fAccess, &GCPhysSecond);
8130 if (rcStrict != VINF_SUCCESS)
8131 return rcStrict;
8132 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8133
8134 PVM pVM = pVCpu->CTX_SUFF(pVM);
8135#ifdef IEM_VERIFICATION_MODE_FULL
8136 /*
8137 * Detect problematic memory when verifying so we can select
8138 * the right execution engine. (TLB: Redo this.)
8139 */
8140 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8141 {
8142 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8143 if (RT_SUCCESS(rc2))
8144 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8145 if (RT_FAILURE(rc2))
8146 pVCpu->iem.s.fProblematicMemory = true;
8147 }
8148#endif
8149
8150
8151 /*
8152 * Read in the current memory content if it's a read, execute or partial
8153 * write access.
8154 */
8155 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8156 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8157 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8158
8159 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8160 {
8161 if (!pVCpu->iem.s.fBypassHandlers)
8162 {
8163 /*
8164 * Must carefully deal with access handler status codes here,
8165 * makes the code a bit bloated.
8166 */
8167 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8168 if (rcStrict == VINF_SUCCESS)
8169 {
8170 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8171 if (rcStrict == VINF_SUCCESS)
8172 { /*likely */ }
8173 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8174 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8175 else
8176 {
8177 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8178 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8179 return rcStrict;
8180 }
8181 }
8182 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8183 {
8184 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8185 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8186 {
8187 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8188 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8189 }
8190 else
8191 {
8192 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8193 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8194 return rcStrict2;
8195 }
8196 }
8197 else
8198 {
8199 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8200 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8201 return rcStrict;
8202 }
8203 }
8204 else
8205 {
8206 /*
8207 * No informational status codes here, much more straight forward.
8208 */
8209 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8210 if (RT_SUCCESS(rc))
8211 {
8212 Assert(rc == VINF_SUCCESS);
8213 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8214 if (RT_SUCCESS(rc))
8215 Assert(rc == VINF_SUCCESS);
8216 else
8217 {
8218 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8219 return rc;
8220 }
8221 }
8222 else
8223 {
8224 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8225 return rc;
8226 }
8227 }
8228
8229#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8230 if ( !pVCpu->iem.s.fNoRem
8231 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8232 {
8233 /*
8234 * Record the reads.
8235 */
8236 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8237 if (pEvtRec)
8238 {
8239 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8240 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8241 pEvtRec->u.RamRead.cb = cbFirstPage;
8242 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8243 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8244 }
8245 pEvtRec = iemVerifyAllocRecord(pVCpu);
8246 if (pEvtRec)
8247 {
8248 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8249 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8250 pEvtRec->u.RamRead.cb = cbSecondPage;
8251 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8252 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8253 }
8254 }
8255#endif
8256 }
8257#ifdef VBOX_STRICT
8258 else
8259 memset(pbBuf, 0xcc, cbMem);
8260 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8261 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8262#endif
8263
8264 /*
8265 * Commit the bounce buffer entry.
8266 */
8267 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8268 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8269 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8270 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8271 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8272 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8273 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8274 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8275 pVCpu->iem.s.cActiveMappings++;
8276
8277 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8278 *ppvMem = pbBuf;
8279 return VINF_SUCCESS;
8280}
8281
8282
8283/**
8284 * iemMemMap woker that deals with iemMemPageMap failures.
8285 */
8286IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8287 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8288{
8289 /*
8290 * Filter out conditions we can handle and the ones which shouldn't happen.
8291 */
8292 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8293 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8294 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8295 {
8296 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8297 return rcMap;
8298 }
8299 pVCpu->iem.s.cPotentialExits++;
8300
8301 /*
8302 * Read in the current memory content if it's a read, execute or partial
8303 * write access.
8304 */
8305 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8306 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8307 {
8308 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8309 memset(pbBuf, 0xff, cbMem);
8310 else
8311 {
8312 int rc;
8313 if (!pVCpu->iem.s.fBypassHandlers)
8314 {
8315 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8316 if (rcStrict == VINF_SUCCESS)
8317 { /* nothing */ }
8318 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8319 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8320 else
8321 {
8322 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8323 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8324 return rcStrict;
8325 }
8326 }
8327 else
8328 {
8329 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8330 if (RT_SUCCESS(rc))
8331 { /* likely */ }
8332 else
8333 {
8334 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8335 GCPhysFirst, rc));
8336 return rc;
8337 }
8338 }
8339 }
8340
8341#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8342 if ( !pVCpu->iem.s.fNoRem
8343 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8344 {
8345 /*
8346 * Record the read.
8347 */
8348 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8349 if (pEvtRec)
8350 {
8351 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8352 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8353 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8354 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8355 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8356 }
8357 }
8358#endif
8359 }
8360#ifdef VBOX_STRICT
8361 else
8362 memset(pbBuf, 0xcc, cbMem);
8363#endif
8364#ifdef VBOX_STRICT
8365 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8366 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8367#endif
8368
8369 /*
8370 * Commit the bounce buffer entry.
8371 */
8372 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8374 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8377 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8378 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8379 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8380 pVCpu->iem.s.cActiveMappings++;
8381
8382 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8383 *ppvMem = pbBuf;
8384 return VINF_SUCCESS;
8385}
8386
8387
8388
8389/**
8390 * Maps the specified guest memory for the given kind of access.
8391 *
8392 * This may be using bounce buffering of the memory if it's crossing a page
8393 * boundary or if there is an access handler installed for any of it. Because
8394 * of lock prefix guarantees, we're in for some extra clutter when this
8395 * happens.
8396 *
8397 * This may raise a \#GP, \#SS, \#PF or \#AC.
8398 *
8399 * @returns VBox strict status code.
8400 *
8401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8402 * @param ppvMem Where to return the pointer to the mapped
8403 * memory.
8404 * @param cbMem The number of bytes to map. This is usually 1,
8405 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8406 * string operations it can be up to a page.
8407 * @param iSegReg The index of the segment register to use for
8408 * this access. The base and limits are checked.
8409 * Use UINT8_MAX to indicate that no segmentation
8410 * is required (for IDT, GDT and LDT accesses).
8411 * @param GCPtrMem The address of the guest memory.
8412 * @param fAccess How the memory is being accessed. The
8413 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8414 * how to map the memory, while the
8415 * IEM_ACCESS_WHAT_XXX bit is used when raising
8416 * exceptions.
8417 */
8418IEM_STATIC VBOXSTRICTRC
8419iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8420{
8421 /*
8422 * Check the input and figure out which mapping entry to use.
8423 */
8424 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8425 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8426 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8427
8428 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8429 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8430 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8431 {
8432 iMemMap = iemMemMapFindFree(pVCpu);
8433 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8434 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8435 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8436 pVCpu->iem.s.aMemMappings[2].fAccess),
8437 VERR_IEM_IPE_9);
8438 }
8439
8440 /*
8441 * Map the memory, checking that we can actually access it. If something
8442 * slightly complicated happens, fall back on bounce buffering.
8443 */
8444 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8445 if (rcStrict != VINF_SUCCESS)
8446 return rcStrict;
8447
8448 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8449 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8450
8451 RTGCPHYS GCPhysFirst;
8452 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8453 if (rcStrict != VINF_SUCCESS)
8454 return rcStrict;
8455
8456 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8457 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8458 if (fAccess & IEM_ACCESS_TYPE_READ)
8459 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8460
8461 void *pvMem;
8462 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8463 if (rcStrict != VINF_SUCCESS)
8464 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8465
8466 /*
8467 * Fill in the mapping table entry.
8468 */
8469 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8470 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8471 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8472 pVCpu->iem.s.cActiveMappings++;
8473
8474 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8475 *ppvMem = pvMem;
8476 return VINF_SUCCESS;
8477}
8478
8479
8480/**
8481 * Commits the guest memory if bounce buffered and unmaps it.
8482 *
8483 * @returns Strict VBox status code.
8484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8485 * @param pvMem The mapping.
8486 * @param fAccess The kind of access.
8487 */
8488IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8489{
8490 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8491 AssertReturn(iMemMap >= 0, iMemMap);
8492
8493 /* If it's bounce buffered, we may need to write back the buffer. */
8494 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8495 {
8496 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8497 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8498 }
8499 /* Otherwise unlock it. */
8500 else
8501 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8502
8503 /* Free the entry. */
8504 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8505 Assert(pVCpu->iem.s.cActiveMappings != 0);
8506 pVCpu->iem.s.cActiveMappings--;
8507 return VINF_SUCCESS;
8508}
8509
8510#ifdef IEM_WITH_SETJMP
8511
8512/**
8513 * Maps the specified guest memory for the given kind of access, longjmp on
8514 * error.
8515 *
8516 * This may be using bounce buffering of the memory if it's crossing a page
8517 * boundary or if there is an access handler installed for any of it. Because
8518 * of lock prefix guarantees, we're in for some extra clutter when this
8519 * happens.
8520 *
8521 * This may raise a \#GP, \#SS, \#PF or \#AC.
8522 *
8523 * @returns Pointer to the mapped memory.
8524 *
8525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8526 * @param cbMem The number of bytes to map. This is usually 1,
8527 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8528 * string operations it can be up to a page.
8529 * @param iSegReg The index of the segment register to use for
8530 * this access. The base and limits are checked.
8531 * Use UINT8_MAX to indicate that no segmentation
8532 * is required (for IDT, GDT and LDT accesses).
8533 * @param GCPtrMem The address of the guest memory.
8534 * @param fAccess How the memory is being accessed. The
8535 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8536 * how to map the memory, while the
8537 * IEM_ACCESS_WHAT_XXX bit is used when raising
8538 * exceptions.
8539 */
8540IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8541{
8542 /*
8543 * Check the input and figure out which mapping entry to use.
8544 */
8545 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8546 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8547 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8548
8549 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8550 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8551 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8552 {
8553 iMemMap = iemMemMapFindFree(pVCpu);
8554 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8555 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8556 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8557 pVCpu->iem.s.aMemMappings[2].fAccess),
8558 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8559 }
8560
8561 /*
8562 * Map the memory, checking that we can actually access it. If something
8563 * slightly complicated happens, fall back on bounce buffering.
8564 */
8565 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8566 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8567 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8568
8569 /* Crossing a page boundary? */
8570 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8571 { /* No (likely). */ }
8572 else
8573 {
8574 void *pvMem;
8575 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8576 if (rcStrict == VINF_SUCCESS)
8577 return pvMem;
8578 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8579 }
8580
8581 RTGCPHYS GCPhysFirst;
8582 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8583 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8584 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8585
8586 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8587 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8588 if (fAccess & IEM_ACCESS_TYPE_READ)
8589 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8590
8591 void *pvMem;
8592 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8593 if (rcStrict == VINF_SUCCESS)
8594 { /* likely */ }
8595 else
8596 {
8597 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8598 if (rcStrict == VINF_SUCCESS)
8599 return pvMem;
8600 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8601 }
8602
8603 /*
8604 * Fill in the mapping table entry.
8605 */
8606 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8607 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8608 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8609 pVCpu->iem.s.cActiveMappings++;
8610
8611 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8612 return pvMem;
8613}
8614
8615
8616/**
8617 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8618 *
8619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8620 * @param pvMem The mapping.
8621 * @param fAccess The kind of access.
8622 */
8623IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8624{
8625 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8626 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8627
8628 /* If it's bounce buffered, we may need to write back the buffer. */
8629 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8630 {
8631 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8632 {
8633 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8634 if (rcStrict == VINF_SUCCESS)
8635 return;
8636 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8637 }
8638 }
8639 /* Otherwise unlock it. */
8640 else
8641 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8642
8643 /* Free the entry. */
8644 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8645 Assert(pVCpu->iem.s.cActiveMappings != 0);
8646 pVCpu->iem.s.cActiveMappings--;
8647}
8648
8649#endif
8650
8651#ifndef IN_RING3
8652/**
8653 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8654 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8655 *
8656 * Allows the instruction to be completed and retired, while the IEM user will
8657 * return to ring-3 immediately afterwards and do the postponed writes there.
8658 *
8659 * @returns VBox status code (no strict statuses). Caller must check
8660 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8662 * @param pvMem The mapping.
8663 * @param fAccess The kind of access.
8664 */
8665IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8666{
8667 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8668 AssertReturn(iMemMap >= 0, iMemMap);
8669
8670 /* If it's bounce buffered, we may need to write back the buffer. */
8671 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8672 {
8673 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8674 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8675 }
8676 /* Otherwise unlock it. */
8677 else
8678 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8679
8680 /* Free the entry. */
8681 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8682 Assert(pVCpu->iem.s.cActiveMappings != 0);
8683 pVCpu->iem.s.cActiveMappings--;
8684 return VINF_SUCCESS;
8685}
8686#endif
8687
8688
8689/**
8690 * Rollbacks mappings, releasing page locks and such.
8691 *
8692 * The caller shall only call this after checking cActiveMappings.
8693 *
8694 * @returns Strict VBox status code to pass up.
8695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8696 */
8697IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8698{
8699 Assert(pVCpu->iem.s.cActiveMappings > 0);
8700
8701 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8702 while (iMemMap-- > 0)
8703 {
8704 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8705 if (fAccess != IEM_ACCESS_INVALID)
8706 {
8707 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8708 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8709 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8710 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8711 Assert(pVCpu->iem.s.cActiveMappings > 0);
8712 pVCpu->iem.s.cActiveMappings--;
8713 }
8714 }
8715}
8716
8717
8718/**
8719 * Fetches a data byte.
8720 *
8721 * @returns Strict VBox status code.
8722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8723 * @param pu8Dst Where to return the byte.
8724 * @param iSegReg The index of the segment register to use for
8725 * this access. The base and limits are checked.
8726 * @param GCPtrMem The address of the guest memory.
8727 */
8728IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8729{
8730 /* The lazy approach for now... */
8731 uint8_t const *pu8Src;
8732 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8733 if (rc == VINF_SUCCESS)
8734 {
8735 *pu8Dst = *pu8Src;
8736 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8737 }
8738 return rc;
8739}
8740
8741
8742#ifdef IEM_WITH_SETJMP
8743/**
8744 * Fetches a data byte, longjmp on error.
8745 *
8746 * @returns The byte.
8747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8748 * @param iSegReg The index of the segment register to use for
8749 * this access. The base and limits are checked.
8750 * @param GCPtrMem The address of the guest memory.
8751 */
8752DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8753{
8754 /* The lazy approach for now... */
8755 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8756 uint8_t const bRet = *pu8Src;
8757 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8758 return bRet;
8759}
8760#endif /* IEM_WITH_SETJMP */
8761
8762
8763/**
8764 * Fetches a data word.
8765 *
8766 * @returns Strict VBox status code.
8767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8768 * @param pu16Dst Where to return the word.
8769 * @param iSegReg The index of the segment register to use for
8770 * this access. The base and limits are checked.
8771 * @param GCPtrMem The address of the guest memory.
8772 */
8773IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8774{
8775 /* The lazy approach for now... */
8776 uint16_t const *pu16Src;
8777 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8778 if (rc == VINF_SUCCESS)
8779 {
8780 *pu16Dst = *pu16Src;
8781 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8782 }
8783 return rc;
8784}
8785
8786
8787#ifdef IEM_WITH_SETJMP
8788/**
8789 * Fetches a data word, longjmp on error.
8790 *
8791 * @returns The word
8792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8793 * @param iSegReg The index of the segment register to use for
8794 * this access. The base and limits are checked.
8795 * @param GCPtrMem The address of the guest memory.
8796 */
8797DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8798{
8799 /* The lazy approach for now... */
8800 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8801 uint16_t const u16Ret = *pu16Src;
8802 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8803 return u16Ret;
8804}
8805#endif
8806
8807
8808/**
8809 * Fetches a data dword.
8810 *
8811 * @returns Strict VBox status code.
8812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8813 * @param pu32Dst Where to return the dword.
8814 * @param iSegReg The index of the segment register to use for
8815 * this access. The base and limits are checked.
8816 * @param GCPtrMem The address of the guest memory.
8817 */
8818IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8819{
8820 /* The lazy approach for now... */
8821 uint32_t const *pu32Src;
8822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8823 if (rc == VINF_SUCCESS)
8824 {
8825 *pu32Dst = *pu32Src;
8826 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8827 }
8828 return rc;
8829}
8830
8831
8832#ifdef IEM_WITH_SETJMP
8833
8834IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8835{
8836 Assert(cbMem >= 1);
8837 Assert(iSegReg < X86_SREG_COUNT);
8838
8839 /*
8840 * 64-bit mode is simpler.
8841 */
8842 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8843 {
8844 if (iSegReg >= X86_SREG_FS)
8845 {
8846 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8847 GCPtrMem += pSel->u64Base;
8848 }
8849
8850 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8851 return GCPtrMem;
8852 }
8853 /*
8854 * 16-bit and 32-bit segmentation.
8855 */
8856 else
8857 {
8858 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8859 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8860 == X86DESCATTR_P /* data, expand up */
8861 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8862 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8863 {
8864 /* expand up */
8865 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8866 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8867 && GCPtrLast32 > (uint32_t)GCPtrMem))
8868 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8869 }
8870 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8871 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8872 {
8873 /* expand down */
8874 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8875 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8876 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8877 && GCPtrLast32 > (uint32_t)GCPtrMem))
8878 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8879 }
8880 else
8881 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8882 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8883 }
8884 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8885}
8886
8887
8888IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8889{
8890 Assert(cbMem >= 1);
8891 Assert(iSegReg < X86_SREG_COUNT);
8892
8893 /*
8894 * 64-bit mode is simpler.
8895 */
8896 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8897 {
8898 if (iSegReg >= X86_SREG_FS)
8899 {
8900 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8901 GCPtrMem += pSel->u64Base;
8902 }
8903
8904 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8905 return GCPtrMem;
8906 }
8907 /*
8908 * 16-bit and 32-bit segmentation.
8909 */
8910 else
8911 {
8912 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8913 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8914 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8915 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8916 {
8917 /* expand up */
8918 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8919 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8920 && GCPtrLast32 > (uint32_t)GCPtrMem))
8921 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8922 }
8923 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8924 {
8925 /* expand down */
8926 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8927 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8928 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8929 && GCPtrLast32 > (uint32_t)GCPtrMem))
8930 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8931 }
8932 else
8933 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8934 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8935 }
8936 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8937}
8938
8939
8940/**
8941 * Fetches a data dword, longjmp on error, fallback/safe version.
8942 *
8943 * @returns The dword
8944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8945 * @param iSegReg The index of the segment register to use for
8946 * this access. The base and limits are checked.
8947 * @param GCPtrMem The address of the guest memory.
8948 */
8949IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8950{
8951 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8952 uint32_t const u32Ret = *pu32Src;
8953 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8954 return u32Ret;
8955}
8956
8957
8958/**
8959 * Fetches a data dword, longjmp on error.
8960 *
8961 * @returns The dword
8962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8963 * @param iSegReg The index of the segment register to use for
8964 * this access. The base and limits are checked.
8965 * @param GCPtrMem The address of the guest memory.
8966 */
8967DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8968{
8969# ifdef IEM_WITH_DATA_TLB
8970 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8971 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8972 {
8973 /// @todo more later.
8974 }
8975
8976 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8977# else
8978 /* The lazy approach. */
8979 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8980 uint32_t const u32Ret = *pu32Src;
8981 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8982 return u32Ret;
8983# endif
8984}
8985#endif
8986
8987
8988#ifdef SOME_UNUSED_FUNCTION
8989/**
8990 * Fetches a data dword and sign extends it to a qword.
8991 *
8992 * @returns Strict VBox status code.
8993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8994 * @param pu64Dst Where to return the sign extended value.
8995 * @param iSegReg The index of the segment register to use for
8996 * this access. The base and limits are checked.
8997 * @param GCPtrMem The address of the guest memory.
8998 */
8999IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9000{
9001 /* The lazy approach for now... */
9002 int32_t const *pi32Src;
9003 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9004 if (rc == VINF_SUCCESS)
9005 {
9006 *pu64Dst = *pi32Src;
9007 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9008 }
9009#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9010 else
9011 *pu64Dst = 0;
9012#endif
9013 return rc;
9014}
9015#endif
9016
9017
9018/**
9019 * Fetches a data qword.
9020 *
9021 * @returns Strict VBox status code.
9022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9023 * @param pu64Dst Where to return the qword.
9024 * @param iSegReg The index of the segment register to use for
9025 * this access. The base and limits are checked.
9026 * @param GCPtrMem The address of the guest memory.
9027 */
9028IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9029{
9030 /* The lazy approach for now... */
9031 uint64_t const *pu64Src;
9032 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9033 if (rc == VINF_SUCCESS)
9034 {
9035 *pu64Dst = *pu64Src;
9036 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9037 }
9038 return rc;
9039}
9040
9041
9042#ifdef IEM_WITH_SETJMP
9043/**
9044 * Fetches a data qword, longjmp on error.
9045 *
9046 * @returns The qword.
9047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9048 * @param iSegReg The index of the segment register to use for
9049 * this access. The base and limits are checked.
9050 * @param GCPtrMem The address of the guest memory.
9051 */
9052DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9053{
9054 /* The lazy approach for now... */
9055 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9056 uint64_t const u64Ret = *pu64Src;
9057 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9058 return u64Ret;
9059}
9060#endif
9061
9062
9063/**
9064 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9065 *
9066 * @returns Strict VBox status code.
9067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9068 * @param pu64Dst Where to return the qword.
9069 * @param iSegReg The index of the segment register to use for
9070 * this access. The base and limits are checked.
9071 * @param GCPtrMem The address of the guest memory.
9072 */
9073IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9074{
9075 /* The lazy approach for now... */
9076 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9077 if (RT_UNLIKELY(GCPtrMem & 15))
9078 return iemRaiseGeneralProtectionFault0(pVCpu);
9079
9080 uint64_t const *pu64Src;
9081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9082 if (rc == VINF_SUCCESS)
9083 {
9084 *pu64Dst = *pu64Src;
9085 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9086 }
9087 return rc;
9088}
9089
9090
9091#ifdef IEM_WITH_SETJMP
9092/**
9093 * Fetches a data qword, longjmp on error.
9094 *
9095 * @returns The qword.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 * @param iSegReg The index of the segment register to use for
9098 * this access. The base and limits are checked.
9099 * @param GCPtrMem The address of the guest memory.
9100 */
9101DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9102{
9103 /* The lazy approach for now... */
9104 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9105 if (RT_LIKELY(!(GCPtrMem & 15)))
9106 {
9107 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9108 uint64_t const u64Ret = *pu64Src;
9109 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9110 return u64Ret;
9111 }
9112
9113 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9114 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9115}
9116#endif
9117
9118
9119/**
9120 * Fetches a data tword.
9121 *
9122 * @returns Strict VBox status code.
9123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9124 * @param pr80Dst Where to return the tword.
9125 * @param iSegReg The index of the segment register to use for
9126 * this access. The base and limits are checked.
9127 * @param GCPtrMem The address of the guest memory.
9128 */
9129IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9130{
9131 /* The lazy approach for now... */
9132 PCRTFLOAT80U pr80Src;
9133 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9134 if (rc == VINF_SUCCESS)
9135 {
9136 *pr80Dst = *pr80Src;
9137 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9138 }
9139 return rc;
9140}
9141
9142
9143#ifdef IEM_WITH_SETJMP
9144/**
9145 * Fetches a data tword, longjmp on error.
9146 *
9147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9148 * @param pr80Dst Where to return the tword.
9149 * @param iSegReg The index of the segment register to use for
9150 * this access. The base and limits are checked.
9151 * @param GCPtrMem The address of the guest memory.
9152 */
9153DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9154{
9155 /* The lazy approach for now... */
9156 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9157 *pr80Dst = *pr80Src;
9158 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9159}
9160#endif
9161
9162
9163/**
9164 * Fetches a data dqword (double qword), generally SSE related.
9165 *
9166 * @returns Strict VBox status code.
9167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9168 * @param pu128Dst Where to return the qword.
9169 * @param iSegReg The index of the segment register to use for
9170 * this access. The base and limits are checked.
9171 * @param GCPtrMem The address of the guest memory.
9172 */
9173IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9174{
9175 /* The lazy approach for now... */
9176 uint128_t const *pu128Src;
9177 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9178 if (rc == VINF_SUCCESS)
9179 {
9180 *pu128Dst = *pu128Src;
9181 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9182 }
9183 return rc;
9184}
9185
9186
9187#ifdef IEM_WITH_SETJMP
9188/**
9189 * Fetches a data dqword (double qword), generally SSE related.
9190 *
9191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9192 * @param pu128Dst Where to return the qword.
9193 * @param iSegReg The index of the segment register to use for
9194 * this access. The base and limits are checked.
9195 * @param GCPtrMem The address of the guest memory.
9196 */
9197IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9198{
9199 /* The lazy approach for now... */
9200 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9201 *pu128Dst = *pu128Src;
9202 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9203}
9204#endif
9205
9206
9207/**
9208 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9209 * related.
9210 *
9211 * Raises \#GP(0) if not aligned.
9212 *
9213 * @returns Strict VBox status code.
9214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9215 * @param pu128Dst Where to return the qword.
9216 * @param iSegReg The index of the segment register to use for
9217 * this access. The base and limits are checked.
9218 * @param GCPtrMem The address of the guest memory.
9219 */
9220IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9221{
9222 /* The lazy approach for now... */
9223 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9224 if ( (GCPtrMem & 15)
9225 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9226 return iemRaiseGeneralProtectionFault0(pVCpu);
9227
9228 uint128_t const *pu128Src;
9229 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9230 if (rc == VINF_SUCCESS)
9231 {
9232 *pu128Dst = *pu128Src;
9233 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9234 }
9235 return rc;
9236}
9237
9238
9239#ifdef IEM_WITH_SETJMP
9240/**
9241 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9242 * related, longjmp on error.
9243 *
9244 * Raises \#GP(0) if not aligned.
9245 *
9246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9247 * @param pu128Dst Where to return the qword.
9248 * @param iSegReg The index of the segment register to use for
9249 * this access. The base and limits are checked.
9250 * @param GCPtrMem The address of the guest memory.
9251 */
9252DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9253{
9254 /* The lazy approach for now... */
9255 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9256 if ( (GCPtrMem & 15) == 0
9257 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9258 {
9259 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9260 IEM_ACCESS_DATA_R);
9261 *pu128Dst = *pu128Src;
9262 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9263 return;
9264 }
9265
9266 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9268}
9269#endif
9270
9271
9272
9273/**
9274 * Fetches a descriptor register (lgdt, lidt).
9275 *
9276 * @returns Strict VBox status code.
9277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9278 * @param pcbLimit Where to return the limit.
9279 * @param pGCPtrBase Where to return the base.
9280 * @param iSegReg The index of the segment register to use for
9281 * this access. The base and limits are checked.
9282 * @param GCPtrMem The address of the guest memory.
9283 * @param enmOpSize The effective operand size.
9284 */
9285IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9286 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9287{
9288 /*
9289 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9290 * little special:
9291 * - The two reads are done separately.
9292 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9293 * - We suspect the 386 to actually commit the limit before the base in
9294 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9295 * don't try emulate this eccentric behavior, because it's not well
9296 * enough understood and rather hard to trigger.
9297 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9298 */
9299 VBOXSTRICTRC rcStrict;
9300 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9301 {
9302 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9303 if (rcStrict == VINF_SUCCESS)
9304 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9305 }
9306 else
9307 {
9308 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9309 if (enmOpSize == IEMMODE_32BIT)
9310 {
9311 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9312 {
9313 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9314 if (rcStrict == VINF_SUCCESS)
9315 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9316 }
9317 else
9318 {
9319 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9320 if (rcStrict == VINF_SUCCESS)
9321 {
9322 *pcbLimit = (uint16_t)uTmp;
9323 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9324 }
9325 }
9326 if (rcStrict == VINF_SUCCESS)
9327 *pGCPtrBase = uTmp;
9328 }
9329 else
9330 {
9331 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9332 if (rcStrict == VINF_SUCCESS)
9333 {
9334 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9335 if (rcStrict == VINF_SUCCESS)
9336 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9337 }
9338 }
9339 }
9340 return rcStrict;
9341}
9342
9343
9344
9345/**
9346 * Stores a data byte.
9347 *
9348 * @returns Strict VBox status code.
9349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9350 * @param iSegReg The index of the segment register to use for
9351 * this access. The base and limits are checked.
9352 * @param GCPtrMem The address of the guest memory.
9353 * @param u8Value The value to store.
9354 */
9355IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9356{
9357 /* The lazy approach for now... */
9358 uint8_t *pu8Dst;
9359 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9360 if (rc == VINF_SUCCESS)
9361 {
9362 *pu8Dst = u8Value;
9363 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9364 }
9365 return rc;
9366}
9367
9368
9369#ifdef IEM_WITH_SETJMP
9370/**
9371 * Stores a data byte, longjmp on error.
9372 *
9373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9374 * @param iSegReg The index of the segment register to use for
9375 * this access. The base and limits are checked.
9376 * @param GCPtrMem The address of the guest memory.
9377 * @param u8Value The value to store.
9378 */
9379IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9380{
9381 /* The lazy approach for now... */
9382 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9383 *pu8Dst = u8Value;
9384 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9385}
9386#endif
9387
9388
9389/**
9390 * Stores a data word.
9391 *
9392 * @returns Strict VBox status code.
9393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 * @param u16Value The value to store.
9398 */
9399IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9400{
9401 /* The lazy approach for now... */
9402 uint16_t *pu16Dst;
9403 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9404 if (rc == VINF_SUCCESS)
9405 {
9406 *pu16Dst = u16Value;
9407 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9408 }
9409 return rc;
9410}
9411
9412
9413#ifdef IEM_WITH_SETJMP
9414/**
9415 * Stores a data word, longjmp on error.
9416 *
9417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9418 * @param iSegReg The index of the segment register to use for
9419 * this access. The base and limits are checked.
9420 * @param GCPtrMem The address of the guest memory.
9421 * @param u16Value The value to store.
9422 */
9423IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9424{
9425 /* The lazy approach for now... */
9426 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9427 *pu16Dst = u16Value;
9428 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9429}
9430#endif
9431
9432
9433/**
9434 * Stores a data dword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param iSegReg The index of the segment register to use for
9439 * this access. The base and limits are checked.
9440 * @param GCPtrMem The address of the guest memory.
9441 * @param u32Value The value to store.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9444{
9445 /* The lazy approach for now... */
9446 uint32_t *pu32Dst;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu32Dst = u32Value;
9451 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9452 }
9453 return rc;
9454}
9455
9456
9457#ifdef IEM_WITH_SETJMP
9458/**
9459 * Stores a data dword.
9460 *
9461 * @returns Strict VBox status code.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param iSegReg The index of the segment register to use for
9464 * this access. The base and limits are checked.
9465 * @param GCPtrMem The address of the guest memory.
9466 * @param u32Value The value to store.
9467 */
9468IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9469{
9470 /* The lazy approach for now... */
9471 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9472 *pu32Dst = u32Value;
9473 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9474}
9475#endif
9476
9477
9478/**
9479 * Stores a data qword.
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param iSegReg The index of the segment register to use for
9484 * this access. The base and limits are checked.
9485 * @param GCPtrMem The address of the guest memory.
9486 * @param u64Value The value to store.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9489{
9490 /* The lazy approach for now... */
9491 uint64_t *pu64Dst;
9492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9493 if (rc == VINF_SUCCESS)
9494 {
9495 *pu64Dst = u64Value;
9496 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9497 }
9498 return rc;
9499}
9500
9501
9502#ifdef IEM_WITH_SETJMP
9503/**
9504 * Stores a data qword, longjmp on error.
9505 *
9506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9507 * @param iSegReg The index of the segment register to use for
9508 * this access. The base and limits are checked.
9509 * @param GCPtrMem The address of the guest memory.
9510 * @param u64Value The value to store.
9511 */
9512IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9513{
9514 /* The lazy approach for now... */
9515 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9516 *pu64Dst = u64Value;
9517 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9518}
9519#endif
9520
9521
9522/**
9523 * Stores a data dqword.
9524 *
9525 * @returns Strict VBox status code.
9526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9527 * @param iSegReg The index of the segment register to use for
9528 * this access. The base and limits are checked.
9529 * @param GCPtrMem The address of the guest memory.
9530 * @param u128Value The value to store.
9531 */
9532IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9533{
9534 /* The lazy approach for now... */
9535 uint128_t *pu128Dst;
9536 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9537 if (rc == VINF_SUCCESS)
9538 {
9539 *pu128Dst = u128Value;
9540 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9541 }
9542 return rc;
9543}
9544
9545
9546#ifdef IEM_WITH_SETJMP
9547/**
9548 * Stores a data dqword, longjmp on error.
9549 *
9550 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9551 * @param iSegReg The index of the segment register to use for
9552 * this access. The base and limits are checked.
9553 * @param GCPtrMem The address of the guest memory.
9554 * @param u128Value The value to store.
9555 */
9556IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9557{
9558 /* The lazy approach for now... */
9559 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9560 *pu128Dst = u128Value;
9561 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9562}
9563#endif
9564
9565
9566/**
9567 * Stores a data dqword, SSE aligned.
9568 *
9569 * @returns Strict VBox status code.
9570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9571 * @param iSegReg The index of the segment register to use for
9572 * this access. The base and limits are checked.
9573 * @param GCPtrMem The address of the guest memory.
9574 * @param u128Value The value to store.
9575 */
9576IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9577{
9578 /* The lazy approach for now... */
9579 if ( (GCPtrMem & 15)
9580 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9581 return iemRaiseGeneralProtectionFault0(pVCpu);
9582
9583 uint128_t *pu128Dst;
9584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9585 if (rc == VINF_SUCCESS)
9586 {
9587 *pu128Dst = u128Value;
9588 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9589 }
9590 return rc;
9591}
9592
9593
9594#ifdef IEM_WITH_SETJMP
9595/**
9596 * Stores a data dqword, SSE aligned.
9597 *
9598 * @returns Strict VBox status code.
9599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 * @param u128Value The value to store.
9604 */
9605DECL_NO_INLINE(IEM_STATIC, void)
9606iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9607{
9608 /* The lazy approach for now... */
9609 if ( (GCPtrMem & 15) == 0
9610 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9611 {
9612 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9613 *pu128Dst = u128Value;
9614 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9615 return;
9616 }
9617
9618 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9619 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9620}
9621#endif
9622
9623
9624/**
9625 * Stores a descriptor register (sgdt, sidt).
9626 *
9627 * @returns Strict VBox status code.
9628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9629 * @param cbLimit The limit.
9630 * @param GCPtrBase The base address.
9631 * @param iSegReg The index of the segment register to use for
9632 * this access. The base and limits are checked.
9633 * @param GCPtrMem The address of the guest memory.
9634 */
9635IEM_STATIC VBOXSTRICTRC
9636iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9637{
9638 /*
9639 * The SIDT and SGDT instructions actually stores the data using two
9640 * independent writes. The instructions does not respond to opsize prefixes.
9641 */
9642 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9643 if (rcStrict == VINF_SUCCESS)
9644 {
9645 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9646 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9647 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9648 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9649 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9650 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9651 else
9652 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9653 }
9654 return rcStrict;
9655}
9656
9657
9658/**
9659 * Pushes a word onto the stack.
9660 *
9661 * @returns Strict VBox status code.
9662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9663 * @param u16Value The value to push.
9664 */
9665IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9666{
9667 /* Increment the stack pointer. */
9668 uint64_t uNewRsp;
9669 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9670 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9671
9672 /* Write the word the lazy way. */
9673 uint16_t *pu16Dst;
9674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9675 if (rc == VINF_SUCCESS)
9676 {
9677 *pu16Dst = u16Value;
9678 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9679 }
9680
9681 /* Commit the new RSP value unless we an access handler made trouble. */
9682 if (rc == VINF_SUCCESS)
9683 pCtx->rsp = uNewRsp;
9684
9685 return rc;
9686}
9687
9688
9689/**
9690 * Pushes a dword onto the stack.
9691 *
9692 * @returns Strict VBox status code.
9693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9694 * @param u32Value The value to push.
9695 */
9696IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9697{
9698 /* Increment the stack pointer. */
9699 uint64_t uNewRsp;
9700 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9701 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9702
9703 /* Write the dword the lazy way. */
9704 uint32_t *pu32Dst;
9705 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9706 if (rc == VINF_SUCCESS)
9707 {
9708 *pu32Dst = u32Value;
9709 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9710 }
9711
9712 /* Commit the new RSP value unless we an access handler made trouble. */
9713 if (rc == VINF_SUCCESS)
9714 pCtx->rsp = uNewRsp;
9715
9716 return rc;
9717}
9718
9719
9720/**
9721 * Pushes a dword segment register value onto the stack.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9725 * @param u32Value The value to push.
9726 */
9727IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9728{
9729 /* Increment the stack pointer. */
9730 uint64_t uNewRsp;
9731 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9732 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9733
9734 VBOXSTRICTRC rc;
9735 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9736 {
9737 /* The recompiler writes a full dword. */
9738 uint32_t *pu32Dst;
9739 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9740 if (rc == VINF_SUCCESS)
9741 {
9742 *pu32Dst = u32Value;
9743 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9744 }
9745 }
9746 else
9747 {
9748 /* The intel docs talks about zero extending the selector register
9749 value. My actual intel CPU here might be zero extending the value
9750 but it still only writes the lower word... */
9751 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9752 * happens when crossing an electric page boundrary, is the high word checked
9753 * for write accessibility or not? Probably it is. What about segment limits?
9754 * It appears this behavior is also shared with trap error codes.
9755 *
9756 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9757 * ancient hardware when it actually did change. */
9758 uint16_t *pu16Dst;
9759 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9760 if (rc == VINF_SUCCESS)
9761 {
9762 *pu16Dst = (uint16_t)u32Value;
9763 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9764 }
9765 }
9766
9767 /* Commit the new RSP value unless we an access handler made trouble. */
9768 if (rc == VINF_SUCCESS)
9769 pCtx->rsp = uNewRsp;
9770
9771 return rc;
9772}
9773
9774
9775/**
9776 * Pushes a qword onto the stack.
9777 *
9778 * @returns Strict VBox status code.
9779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9780 * @param u64Value The value to push.
9781 */
9782IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9783{
9784 /* Increment the stack pointer. */
9785 uint64_t uNewRsp;
9786 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9787 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9788
9789 /* Write the word the lazy way. */
9790 uint64_t *pu64Dst;
9791 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9792 if (rc == VINF_SUCCESS)
9793 {
9794 *pu64Dst = u64Value;
9795 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9796 }
9797
9798 /* Commit the new RSP value unless we an access handler made trouble. */
9799 if (rc == VINF_SUCCESS)
9800 pCtx->rsp = uNewRsp;
9801
9802 return rc;
9803}
9804
9805
9806/**
9807 * Pops a word from the stack.
9808 *
9809 * @returns Strict VBox status code.
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param pu16Value Where to store the popped value.
9812 */
9813IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9814{
9815 /* Increment the stack pointer. */
9816 uint64_t uNewRsp;
9817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9818 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9819
9820 /* Write the word the lazy way. */
9821 uint16_t const *pu16Src;
9822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9823 if (rc == VINF_SUCCESS)
9824 {
9825 *pu16Value = *pu16Src;
9826 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9827
9828 /* Commit the new RSP value. */
9829 if (rc == VINF_SUCCESS)
9830 pCtx->rsp = uNewRsp;
9831 }
9832
9833 return rc;
9834}
9835
9836
9837/**
9838 * Pops a dword from the stack.
9839 *
9840 * @returns Strict VBox status code.
9841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9842 * @param pu32Value Where to store the popped value.
9843 */
9844IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9845{
9846 /* Increment the stack pointer. */
9847 uint64_t uNewRsp;
9848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9849 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9850
9851 /* Write the word the lazy way. */
9852 uint32_t const *pu32Src;
9853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9854 if (rc == VINF_SUCCESS)
9855 {
9856 *pu32Value = *pu32Src;
9857 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9858
9859 /* Commit the new RSP value. */
9860 if (rc == VINF_SUCCESS)
9861 pCtx->rsp = uNewRsp;
9862 }
9863
9864 return rc;
9865}
9866
9867
9868/**
9869 * Pops a qword from the stack.
9870 *
9871 * @returns Strict VBox status code.
9872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9873 * @param pu64Value Where to store the popped value.
9874 */
9875IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9876{
9877 /* Increment the stack pointer. */
9878 uint64_t uNewRsp;
9879 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9880 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9881
9882 /* Write the word the lazy way. */
9883 uint64_t const *pu64Src;
9884 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9885 if (rc == VINF_SUCCESS)
9886 {
9887 *pu64Value = *pu64Src;
9888 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9889
9890 /* Commit the new RSP value. */
9891 if (rc == VINF_SUCCESS)
9892 pCtx->rsp = uNewRsp;
9893 }
9894
9895 return rc;
9896}
9897
9898
9899/**
9900 * Pushes a word onto the stack, using a temporary stack pointer.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param u16Value The value to push.
9905 * @param pTmpRsp Pointer to the temporary stack pointer.
9906 */
9907IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9908{
9909 /* Increment the stack pointer. */
9910 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9911 RTUINT64U NewRsp = *pTmpRsp;
9912 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9913
9914 /* Write the word the lazy way. */
9915 uint16_t *pu16Dst;
9916 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9917 if (rc == VINF_SUCCESS)
9918 {
9919 *pu16Dst = u16Value;
9920 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9921 }
9922
9923 /* Commit the new RSP value unless we an access handler made trouble. */
9924 if (rc == VINF_SUCCESS)
9925 *pTmpRsp = NewRsp;
9926
9927 return rc;
9928}
9929
9930
9931/**
9932 * Pushes a dword onto the stack, using a temporary stack pointer.
9933 *
9934 * @returns Strict VBox status code.
9935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9936 * @param u32Value The value to push.
9937 * @param pTmpRsp Pointer to the temporary stack pointer.
9938 */
9939IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9940{
9941 /* Increment the stack pointer. */
9942 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9943 RTUINT64U NewRsp = *pTmpRsp;
9944 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9945
9946 /* Write the word the lazy way. */
9947 uint32_t *pu32Dst;
9948 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9949 if (rc == VINF_SUCCESS)
9950 {
9951 *pu32Dst = u32Value;
9952 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9953 }
9954
9955 /* Commit the new RSP value unless we an access handler made trouble. */
9956 if (rc == VINF_SUCCESS)
9957 *pTmpRsp = NewRsp;
9958
9959 return rc;
9960}
9961
9962
9963/**
9964 * Pushes a dword onto the stack, using a temporary stack pointer.
9965 *
9966 * @returns Strict VBox status code.
9967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9968 * @param u64Value The value to push.
9969 * @param pTmpRsp Pointer to the temporary stack pointer.
9970 */
9971IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9972{
9973 /* Increment the stack pointer. */
9974 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9975 RTUINT64U NewRsp = *pTmpRsp;
9976 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9977
9978 /* Write the word the lazy way. */
9979 uint64_t *pu64Dst;
9980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9981 if (rc == VINF_SUCCESS)
9982 {
9983 *pu64Dst = u64Value;
9984 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9985 }
9986
9987 /* Commit the new RSP value unless we an access handler made trouble. */
9988 if (rc == VINF_SUCCESS)
9989 *pTmpRsp = NewRsp;
9990
9991 return rc;
9992}
9993
9994
9995/**
9996 * Pops a word from the stack, using a temporary stack pointer.
9997 *
9998 * @returns Strict VBox status code.
9999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10000 * @param pu16Value Where to store the popped value.
10001 * @param pTmpRsp Pointer to the temporary stack pointer.
10002 */
10003IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10004{
10005 /* Increment the stack pointer. */
10006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10007 RTUINT64U NewRsp = *pTmpRsp;
10008 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10009
10010 /* Write the word the lazy way. */
10011 uint16_t const *pu16Src;
10012 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10013 if (rc == VINF_SUCCESS)
10014 {
10015 *pu16Value = *pu16Src;
10016 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10017
10018 /* Commit the new RSP value. */
10019 if (rc == VINF_SUCCESS)
10020 *pTmpRsp = NewRsp;
10021 }
10022
10023 return rc;
10024}
10025
10026
10027/**
10028 * Pops a dword from the stack, using a temporary stack pointer.
10029 *
10030 * @returns Strict VBox status code.
10031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10032 * @param pu32Value Where to store the popped value.
10033 * @param pTmpRsp Pointer to the temporary stack pointer.
10034 */
10035IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10036{
10037 /* Increment the stack pointer. */
10038 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10039 RTUINT64U NewRsp = *pTmpRsp;
10040 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10041
10042 /* Write the word the lazy way. */
10043 uint32_t const *pu32Src;
10044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10045 if (rc == VINF_SUCCESS)
10046 {
10047 *pu32Value = *pu32Src;
10048 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10049
10050 /* Commit the new RSP value. */
10051 if (rc == VINF_SUCCESS)
10052 *pTmpRsp = NewRsp;
10053 }
10054
10055 return rc;
10056}
10057
10058
10059/**
10060 * Pops a qword from the stack, using a temporary stack pointer.
10061 *
10062 * @returns Strict VBox status code.
10063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10064 * @param pu64Value Where to store the popped value.
10065 * @param pTmpRsp Pointer to the temporary stack pointer.
10066 */
10067IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10068{
10069 /* Increment the stack pointer. */
10070 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10071 RTUINT64U NewRsp = *pTmpRsp;
10072 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10073
10074 /* Write the word the lazy way. */
10075 uint64_t const *pu64Src;
10076 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10077 if (rcStrict == VINF_SUCCESS)
10078 {
10079 *pu64Value = *pu64Src;
10080 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10081
10082 /* Commit the new RSP value. */
10083 if (rcStrict == VINF_SUCCESS)
10084 *pTmpRsp = NewRsp;
10085 }
10086
10087 return rcStrict;
10088}
10089
10090
10091/**
10092 * Begin a special stack push (used by interrupt, exceptions and such).
10093 *
10094 * This will raise \#SS or \#PF if appropriate.
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10098 * @param cbMem The number of bytes to push onto the stack.
10099 * @param ppvMem Where to return the pointer to the stack memory.
10100 * As with the other memory functions this could be
10101 * direct access or bounce buffered access, so
10102 * don't commit register until the commit call
10103 * succeeds.
10104 * @param puNewRsp Where to return the new RSP value. This must be
10105 * passed unchanged to
10106 * iemMemStackPushCommitSpecial().
10107 */
10108IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10109{
10110 Assert(cbMem < UINT8_MAX);
10111 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10112 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10113 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10114}
10115
10116
10117/**
10118 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10119 *
10120 * This will update the rSP.
10121 *
10122 * @returns Strict VBox status code.
10123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10124 * @param pvMem The pointer returned by
10125 * iemMemStackPushBeginSpecial().
10126 * @param uNewRsp The new RSP value returned by
10127 * iemMemStackPushBeginSpecial().
10128 */
10129IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10130{
10131 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10132 if (rcStrict == VINF_SUCCESS)
10133 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10134 return rcStrict;
10135}
10136
10137
10138/**
10139 * Begin a special stack pop (used by iret, retf and such).
10140 *
10141 * This will raise \#SS or \#PF if appropriate.
10142 *
10143 * @returns Strict VBox status code.
10144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10145 * @param cbMem The number of bytes to pop from the stack.
10146 * @param ppvMem Where to return the pointer to the stack memory.
10147 * @param puNewRsp Where to return the new RSP value. This must be
10148 * assigned to CPUMCTX::rsp manually some time
10149 * after iemMemStackPopDoneSpecial() has been
10150 * called.
10151 */
10152IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10153{
10154 Assert(cbMem < UINT8_MAX);
10155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10156 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10157 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10158}
10159
10160
10161/**
10162 * Continue a special stack pop (used by iret and retf).
10163 *
10164 * This will raise \#SS or \#PF if appropriate.
10165 *
10166 * @returns Strict VBox status code.
10167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10168 * @param cbMem The number of bytes to pop from the stack.
10169 * @param ppvMem Where to return the pointer to the stack memory.
10170 * @param puNewRsp Where to return the new RSP value. This must be
10171 * assigned to CPUMCTX::rsp manually some time
10172 * after iemMemStackPopDoneSpecial() has been
10173 * called.
10174 */
10175IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10176{
10177 Assert(cbMem < UINT8_MAX);
10178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10179 RTUINT64U NewRsp;
10180 NewRsp.u = *puNewRsp;
10181 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10182 *puNewRsp = NewRsp.u;
10183 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10184}
10185
10186
10187/**
10188 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10189 * iemMemStackPopContinueSpecial).
10190 *
10191 * The caller will manually commit the rSP.
10192 *
10193 * @returns Strict VBox status code.
10194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10195 * @param pvMem The pointer returned by
10196 * iemMemStackPopBeginSpecial() or
10197 * iemMemStackPopContinueSpecial().
10198 */
10199IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10200{
10201 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10202}
10203
10204
10205/**
10206 * Fetches a system table byte.
10207 *
10208 * @returns Strict VBox status code.
10209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10210 * @param pbDst Where to return the byte.
10211 * @param iSegReg The index of the segment register to use for
10212 * this access. The base and limits are checked.
10213 * @param GCPtrMem The address of the guest memory.
10214 */
10215IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10216{
10217 /* The lazy approach for now... */
10218 uint8_t const *pbSrc;
10219 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10220 if (rc == VINF_SUCCESS)
10221 {
10222 *pbDst = *pbSrc;
10223 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10224 }
10225 return rc;
10226}
10227
10228
10229/**
10230 * Fetches a system table word.
10231 *
10232 * @returns Strict VBox status code.
10233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10234 * @param pu16Dst Where to return the word.
10235 * @param iSegReg The index of the segment register to use for
10236 * this access. The base and limits are checked.
10237 * @param GCPtrMem The address of the guest memory.
10238 */
10239IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10240{
10241 /* The lazy approach for now... */
10242 uint16_t const *pu16Src;
10243 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10244 if (rc == VINF_SUCCESS)
10245 {
10246 *pu16Dst = *pu16Src;
10247 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10248 }
10249 return rc;
10250}
10251
10252
10253/**
10254 * Fetches a system table dword.
10255 *
10256 * @returns Strict VBox status code.
10257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10258 * @param pu32Dst Where to return the dword.
10259 * @param iSegReg The index of the segment register to use for
10260 * this access. The base and limits are checked.
10261 * @param GCPtrMem The address of the guest memory.
10262 */
10263IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10264{
10265 /* The lazy approach for now... */
10266 uint32_t const *pu32Src;
10267 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10268 if (rc == VINF_SUCCESS)
10269 {
10270 *pu32Dst = *pu32Src;
10271 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10272 }
10273 return rc;
10274}
10275
10276
10277/**
10278 * Fetches a system table qword.
10279 *
10280 * @returns Strict VBox status code.
10281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10282 * @param pu64Dst Where to return the qword.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 */
10287IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10288{
10289 /* The lazy approach for now... */
10290 uint64_t const *pu64Src;
10291 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10292 if (rc == VINF_SUCCESS)
10293 {
10294 *pu64Dst = *pu64Src;
10295 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10296 }
10297 return rc;
10298}
10299
10300
10301/**
10302 * Fetches a descriptor table entry with caller specified error code.
10303 *
10304 * @returns Strict VBox status code.
10305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10306 * @param pDesc Where to return the descriptor table entry.
10307 * @param uSel The selector which table entry to fetch.
10308 * @param uXcpt The exception to raise on table lookup error.
10309 * @param uErrorCode The error code associated with the exception.
10310 */
10311IEM_STATIC VBOXSTRICTRC
10312iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10313{
10314 AssertPtr(pDesc);
10315 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10316
10317 /** @todo did the 286 require all 8 bytes to be accessible? */
10318 /*
10319 * Get the selector table base and check bounds.
10320 */
10321 RTGCPTR GCPtrBase;
10322 if (uSel & X86_SEL_LDT)
10323 {
10324 if ( !pCtx->ldtr.Attr.n.u1Present
10325 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10326 {
10327 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10328 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10329 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10330 uErrorCode, 0);
10331 }
10332
10333 Assert(pCtx->ldtr.Attr.n.u1Present);
10334 GCPtrBase = pCtx->ldtr.u64Base;
10335 }
10336 else
10337 {
10338 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10339 {
10340 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10341 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10342 uErrorCode, 0);
10343 }
10344 GCPtrBase = pCtx->gdtr.pGdt;
10345 }
10346
10347 /*
10348 * Read the legacy descriptor and maybe the long mode extensions if
10349 * required.
10350 */
10351 VBOXSTRICTRC rcStrict;
10352 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10353 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10354 else
10355 {
10356 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10357 if (rcStrict == VINF_SUCCESS)
10358 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10359 if (rcStrict == VINF_SUCCESS)
10360 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10361 if (rcStrict == VINF_SUCCESS)
10362 pDesc->Legacy.au16[3] = 0;
10363 else
10364 return rcStrict;
10365 }
10366
10367 if (rcStrict == VINF_SUCCESS)
10368 {
10369 if ( !IEM_IS_LONG_MODE(pVCpu)
10370 || pDesc->Legacy.Gen.u1DescType)
10371 pDesc->Long.au64[1] = 0;
10372 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10373 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10374 else
10375 {
10376 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10377 /** @todo is this the right exception? */
10378 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10379 }
10380 }
10381 return rcStrict;
10382}
10383
10384
10385/**
10386 * Fetches a descriptor table entry.
10387 *
10388 * @returns Strict VBox status code.
10389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10390 * @param pDesc Where to return the descriptor table entry.
10391 * @param uSel The selector which table entry to fetch.
10392 * @param uXcpt The exception to raise on table lookup error.
10393 */
10394IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10395{
10396 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10397}
10398
10399
10400/**
10401 * Fakes a long mode stack selector for SS = 0.
10402 *
10403 * @param pDescSs Where to return the fake stack descriptor.
10404 * @param uDpl The DPL we want.
10405 */
10406IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10407{
10408 pDescSs->Long.au64[0] = 0;
10409 pDescSs->Long.au64[1] = 0;
10410 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10411 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10412 pDescSs->Long.Gen.u2Dpl = uDpl;
10413 pDescSs->Long.Gen.u1Present = 1;
10414 pDescSs->Long.Gen.u1Long = 1;
10415}
10416
10417
10418/**
10419 * Marks the selector descriptor as accessed (only non-system descriptors).
10420 *
10421 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10422 * will therefore skip the limit checks.
10423 *
10424 * @returns Strict VBox status code.
10425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10426 * @param uSel The selector.
10427 */
10428IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10429{
10430 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10431
10432 /*
10433 * Get the selector table base and calculate the entry address.
10434 */
10435 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10436 ? pCtx->ldtr.u64Base
10437 : pCtx->gdtr.pGdt;
10438 GCPtr += uSel & X86_SEL_MASK;
10439
10440 /*
10441 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10442 * ugly stuff to avoid this. This will make sure it's an atomic access
10443 * as well more or less remove any question about 8-bit or 32-bit accesss.
10444 */
10445 VBOXSTRICTRC rcStrict;
10446 uint32_t volatile *pu32;
10447 if ((GCPtr & 3) == 0)
10448 {
10449 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10450 GCPtr += 2 + 2;
10451 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10452 if (rcStrict != VINF_SUCCESS)
10453 return rcStrict;
10454 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10455 }
10456 else
10457 {
10458 /* The misaligned GDT/LDT case, map the whole thing. */
10459 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10460 if (rcStrict != VINF_SUCCESS)
10461 return rcStrict;
10462 switch ((uintptr_t)pu32 & 3)
10463 {
10464 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10465 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10466 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10467 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10468 }
10469 }
10470
10471 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10472}
10473
10474/** @} */
10475
10476
10477/*
10478 * Include the C/C++ implementation of instruction.
10479 */
10480#include "IEMAllCImpl.cpp.h"
10481
10482
10483
10484/** @name "Microcode" macros.
10485 *
10486 * The idea is that we should be able to use the same code to interpret
10487 * instructions as well as recompiler instructions. Thus this obfuscation.
10488 *
10489 * @{
10490 */
10491#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10492#define IEM_MC_END() }
10493#define IEM_MC_PAUSE() do {} while (0)
10494#define IEM_MC_CONTINUE() do {} while (0)
10495
10496/** Internal macro. */
10497#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10498 do \
10499 { \
10500 VBOXSTRICTRC rcStrict2 = a_Expr; \
10501 if (rcStrict2 != VINF_SUCCESS) \
10502 return rcStrict2; \
10503 } while (0)
10504
10505
10506#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10507#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10508#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10509#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10510#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10511#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10512#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10513#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10514#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10515 do { \
10516 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10517 return iemRaiseDeviceNotAvailable(pVCpu); \
10518 } while (0)
10519#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10520 do { \
10521 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10522 return iemRaiseDeviceNotAvailable(pVCpu); \
10523 } while (0)
10524#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10525 do { \
10526 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10527 return iemRaiseMathFault(pVCpu); \
10528 } while (0)
10529#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10530 do { \
10531 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10532 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10533 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10534 return iemRaiseUndefinedOpcode(pVCpu); \
10535 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10536 return iemRaiseDeviceNotAvailable(pVCpu); \
10537 } while (0)
10538#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10539 do { \
10540 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10541 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10542 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10543 return iemRaiseUndefinedOpcode(pVCpu); \
10544 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10545 return iemRaiseDeviceNotAvailable(pVCpu); \
10546 } while (0)
10547#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10548 do { \
10549 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10550 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10551 return iemRaiseUndefinedOpcode(pVCpu); \
10552 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10553 return iemRaiseDeviceNotAvailable(pVCpu); \
10554 } while (0)
10555#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10556 do { \
10557 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10558 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10559 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10560 return iemRaiseUndefinedOpcode(pVCpu); \
10561 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10562 return iemRaiseDeviceNotAvailable(pVCpu); \
10563 } while (0)
10564#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10565 do { \
10566 if (pVCpu->iem.s.uCpl != 0) \
10567 return iemRaiseGeneralProtectionFault0(pVCpu); \
10568 } while (0)
10569#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10570 do { \
10571 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10572 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10573 } while (0)
10574
10575
10576#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10577#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10578#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10579#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10580#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10581#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10582#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10583 uint32_t a_Name; \
10584 uint32_t *a_pName = &a_Name
10585#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10586 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10587
10588#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10589#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10590
10591#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10592#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10593#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10594#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10595#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10596#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10597#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10598#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10599#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10600#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10601#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10602#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10603#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10604#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10605#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10606#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10607#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10608#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10609#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10610#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10611#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10612#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10613#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10614#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10615#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10616#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10617#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10618#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10619#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10620/** @note Not for IOPL or IF testing or modification. */
10621#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10622#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10623#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10624#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10625
10626#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10627#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10628#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10629#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10630#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10631#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10632#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10633#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10634#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10635#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10636#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10637 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10638
10639#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10640#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10641/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10642 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10643#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10644#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10645/** @note Not for IOPL or IF testing or modification. */
10646#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10647
10648#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10649#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10650#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10651 do { \
10652 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10653 *pu32Reg += (a_u32Value); \
10654 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10655 } while (0)
10656#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10657
10658#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10659#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10660#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10661 do { \
10662 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10663 *pu32Reg -= (a_u32Value); \
10664 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10665 } while (0)
10666#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10667#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10668
10669#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10670#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10671#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10672#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10673#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10674#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10675#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10676
10677#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10678#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10679#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10680#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10681
10682#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10683#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10684#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10685
10686#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10687#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10688#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10689
10690#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10691#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10692#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10693
10694#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10695#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10696#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10697
10698#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10699
10700#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10701
10702#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10703#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10704#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10705 do { \
10706 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10707 *pu32Reg &= (a_u32Value); \
10708 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10709 } while (0)
10710#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10711
10712#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10713#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10714#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10715 do { \
10716 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10717 *pu32Reg |= (a_u32Value); \
10718 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10719 } while (0)
10720#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10721
10722
10723/** @note Not for IOPL or IF modification. */
10724#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10725/** @note Not for IOPL or IF modification. */
10726#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10727/** @note Not for IOPL or IF modification. */
10728#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10729
10730#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10731
10732
10733#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10734 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10735#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10736 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10737#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10738 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10739#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10740 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10741#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10742 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10743#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10744 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10745#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10746 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10747
10748#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10749 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10750#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10751 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10752#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10753 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10754#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10755 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10756#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10757 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10758#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10759 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10760 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10761 } while (0)
10762#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10763 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10764 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10765 } while (0)
10766#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10767 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10768#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10769 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10770#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10771 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10772#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10773 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10774 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10775
10776#ifndef IEM_WITH_SETJMP
10777# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10779# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10781# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10783#else
10784# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10785 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10786# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10787 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10788# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10789 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10790#endif
10791
10792#ifndef IEM_WITH_SETJMP
10793# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10795# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10797# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10799#else
10800# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10801 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10802# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10803 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10804# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10805 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10806#endif
10807
10808#ifndef IEM_WITH_SETJMP
10809# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10811# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10813# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10815#else
10816# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10817 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10818# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10819 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10820# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10821 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10822#endif
10823
10824#ifdef SOME_UNUSED_FUNCTION
10825# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10827#endif
10828
10829#ifndef IEM_WITH_SETJMP
10830# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10832# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10834# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10836# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10838#else
10839# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10840 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10841# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10842 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10843# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10844 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10845# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10846 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10847#endif
10848
10849#ifndef IEM_WITH_SETJMP
10850# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10852# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10853 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10854# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10856#else
10857# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10858 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10859# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10860 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10861# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10862 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10863#endif
10864
10865#ifndef IEM_WITH_SETJMP
10866# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10867 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10868# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10870#else
10871# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10872 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10873# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10874 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10875#endif
10876
10877
10878
10879#ifndef IEM_WITH_SETJMP
10880# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10881 do { \
10882 uint8_t u8Tmp; \
10883 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10884 (a_u16Dst) = u8Tmp; \
10885 } while (0)
10886# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10887 do { \
10888 uint8_t u8Tmp; \
10889 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10890 (a_u32Dst) = u8Tmp; \
10891 } while (0)
10892# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10893 do { \
10894 uint8_t u8Tmp; \
10895 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10896 (a_u64Dst) = u8Tmp; \
10897 } while (0)
10898# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10899 do { \
10900 uint16_t u16Tmp; \
10901 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10902 (a_u32Dst) = u16Tmp; \
10903 } while (0)
10904# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10905 do { \
10906 uint16_t u16Tmp; \
10907 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10908 (a_u64Dst) = u16Tmp; \
10909 } while (0)
10910# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10911 do { \
10912 uint32_t u32Tmp; \
10913 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10914 (a_u64Dst) = u32Tmp; \
10915 } while (0)
10916#else /* IEM_WITH_SETJMP */
10917# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10918 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10919# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10920 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10921# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10922 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10923# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10924 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10925# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10926 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10927# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10928 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10929#endif /* IEM_WITH_SETJMP */
10930
10931#ifndef IEM_WITH_SETJMP
10932# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10933 do { \
10934 uint8_t u8Tmp; \
10935 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10936 (a_u16Dst) = (int8_t)u8Tmp; \
10937 } while (0)
10938# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10939 do { \
10940 uint8_t u8Tmp; \
10941 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10942 (a_u32Dst) = (int8_t)u8Tmp; \
10943 } while (0)
10944# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10945 do { \
10946 uint8_t u8Tmp; \
10947 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10948 (a_u64Dst) = (int8_t)u8Tmp; \
10949 } while (0)
10950# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10951 do { \
10952 uint16_t u16Tmp; \
10953 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10954 (a_u32Dst) = (int16_t)u16Tmp; \
10955 } while (0)
10956# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10957 do { \
10958 uint16_t u16Tmp; \
10959 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10960 (a_u64Dst) = (int16_t)u16Tmp; \
10961 } while (0)
10962# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10963 do { \
10964 uint32_t u32Tmp; \
10965 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10966 (a_u64Dst) = (int32_t)u32Tmp; \
10967 } while (0)
10968#else /* IEM_WITH_SETJMP */
10969# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10970 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10971# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10972 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10973# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10974 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10975# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10976 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10977# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10978 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10979# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10980 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10981#endif /* IEM_WITH_SETJMP */
10982
10983#ifndef IEM_WITH_SETJMP
10984# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10985 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10986# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10987 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10988# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10989 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10990# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10991 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10992#else
10993# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10994 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10995# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10996 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10997# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10998 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10999# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11000 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11001#endif
11002
11003#ifndef IEM_WITH_SETJMP
11004# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11005 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11006# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11007 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11008# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11009 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11010# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11011 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11012#else
11013# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11014 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11015# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11016 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11017# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11018 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11019# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11020 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11021#endif
11022
11023#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11024#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11025#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11026#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11027#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11028#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11029#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11030 do { \
11031 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11032 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11033 } while (0)
11034
11035#ifndef IEM_WITH_SETJMP
11036# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11037 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11038# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11039 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11040#else
11041# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11042 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11043# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11044 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11045#endif
11046
11047
11048#define IEM_MC_PUSH_U16(a_u16Value) \
11049 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11050#define IEM_MC_PUSH_U32(a_u32Value) \
11051 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11052#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11053 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11054#define IEM_MC_PUSH_U64(a_u64Value) \
11055 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11056
11057#define IEM_MC_POP_U16(a_pu16Value) \
11058 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11059#define IEM_MC_POP_U32(a_pu32Value) \
11060 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11061#define IEM_MC_POP_U64(a_pu64Value) \
11062 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11063
11064/** Maps guest memory for direct or bounce buffered access.
11065 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11066 * @remarks May return.
11067 */
11068#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11069 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11070
11071/** Maps guest memory for direct or bounce buffered access.
11072 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11073 * @remarks May return.
11074 */
11075#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11076 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11077
11078/** Commits the memory and unmaps the guest memory.
11079 * @remarks May return.
11080 */
11081#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11082 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11083
11084/** Commits the memory and unmaps the guest memory unless the FPU status word
11085 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11086 * that would cause FLD not to store.
11087 *
11088 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11089 * store, while \#P will not.
11090 *
11091 * @remarks May in theory return - for now.
11092 */
11093#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11094 do { \
11095 if ( !(a_u16FSW & X86_FSW_ES) \
11096 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11097 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11098 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11099 } while (0)
11100
11101/** Calculate efficient address from R/M. */
11102#ifndef IEM_WITH_SETJMP
11103# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11104 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11105#else
11106# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11107 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11108#endif
11109
11110#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11111#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11112#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11113#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11114#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11115#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11116#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11117
11118/**
11119 * Defers the rest of the instruction emulation to a C implementation routine
11120 * and returns, only taking the standard parameters.
11121 *
11122 * @param a_pfnCImpl The pointer to the C routine.
11123 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11124 */
11125#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11126
11127/**
11128 * Defers the rest of instruction emulation to a C implementation routine and
11129 * returns, taking one argument in addition to the standard ones.
11130 *
11131 * @param a_pfnCImpl The pointer to the C routine.
11132 * @param a0 The argument.
11133 */
11134#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11135
11136/**
11137 * Defers the rest of the instruction emulation to a C implementation routine
11138 * and returns, taking two arguments in addition to the standard ones.
11139 *
11140 * @param a_pfnCImpl The pointer to the C routine.
11141 * @param a0 The first extra argument.
11142 * @param a1 The second extra argument.
11143 */
11144#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11145
11146/**
11147 * Defers the rest of the instruction emulation to a C implementation routine
11148 * and returns, taking three arguments in addition to the standard ones.
11149 *
11150 * @param a_pfnCImpl The pointer to the C routine.
11151 * @param a0 The first extra argument.
11152 * @param a1 The second extra argument.
11153 * @param a2 The third extra argument.
11154 */
11155#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11156
11157/**
11158 * Defers the rest of the instruction emulation to a C implementation routine
11159 * and returns, taking four arguments in addition to the standard ones.
11160 *
11161 * @param a_pfnCImpl The pointer to the C routine.
11162 * @param a0 The first extra argument.
11163 * @param a1 The second extra argument.
11164 * @param a2 The third extra argument.
11165 * @param a3 The fourth extra argument.
11166 */
11167#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11168
11169/**
11170 * Defers the rest of the instruction emulation to a C implementation routine
11171 * and returns, taking two arguments in addition to the standard ones.
11172 *
11173 * @param a_pfnCImpl The pointer to the C routine.
11174 * @param a0 The first extra argument.
11175 * @param a1 The second extra argument.
11176 * @param a2 The third extra argument.
11177 * @param a3 The fourth extra argument.
11178 * @param a4 The fifth extra argument.
11179 */
11180#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11181
11182/**
11183 * Defers the entire instruction emulation to a C implementation routine and
11184 * returns, only taking the standard parameters.
11185 *
11186 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11187 *
11188 * @param a_pfnCImpl The pointer to the C routine.
11189 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11190 */
11191#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11192
11193/**
11194 * Defers the entire instruction emulation to a C implementation routine and
11195 * returns, taking one argument in addition to the standard ones.
11196 *
11197 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11198 *
11199 * @param a_pfnCImpl The pointer to the C routine.
11200 * @param a0 The argument.
11201 */
11202#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11203
11204/**
11205 * Defers the entire instruction emulation to a C implementation routine and
11206 * returns, taking two arguments in addition to the standard ones.
11207 *
11208 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11209 *
11210 * @param a_pfnCImpl The pointer to the C routine.
11211 * @param a0 The first extra argument.
11212 * @param a1 The second extra argument.
11213 */
11214#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11215
11216/**
11217 * Defers the entire instruction emulation to a C implementation routine and
11218 * returns, taking three arguments in addition to the standard ones.
11219 *
11220 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11221 *
11222 * @param a_pfnCImpl The pointer to the C routine.
11223 * @param a0 The first extra argument.
11224 * @param a1 The second extra argument.
11225 * @param a2 The third extra argument.
11226 */
11227#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11228
11229/**
11230 * Calls a FPU assembly implementation taking one visible argument.
11231 *
11232 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11233 * @param a0 The first extra argument.
11234 */
11235#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11236 do { \
11237 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11238 } while (0)
11239
11240/**
11241 * Calls a FPU assembly implementation taking two visible arguments.
11242 *
11243 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11244 * @param a0 The first extra argument.
11245 * @param a1 The second extra argument.
11246 */
11247#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11248 do { \
11249 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11250 } while (0)
11251
11252/**
11253 * Calls a FPU assembly implementation taking three visible arguments.
11254 *
11255 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11256 * @param a0 The first extra argument.
11257 * @param a1 The second extra argument.
11258 * @param a2 The third extra argument.
11259 */
11260#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11261 do { \
11262 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11263 } while (0)
11264
11265#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11266 do { \
11267 (a_FpuData).FSW = (a_FSW); \
11268 (a_FpuData).r80Result = *(a_pr80Value); \
11269 } while (0)
11270
11271/** Pushes FPU result onto the stack. */
11272#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11273 iemFpuPushResult(pVCpu, &a_FpuData)
11274/** Pushes FPU result onto the stack and sets the FPUDP. */
11275#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11276 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11277
11278/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11279#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11280 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11281
11282/** Stores FPU result in a stack register. */
11283#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11284 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11285/** Stores FPU result in a stack register and pops the stack. */
11286#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11287 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11288/** Stores FPU result in a stack register and sets the FPUDP. */
11289#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11290 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11291/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11292 * stack. */
11293#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11294 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11295
11296/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11297#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11298 iemFpuUpdateOpcodeAndIp(pVCpu)
11299/** Free a stack register (for FFREE and FFREEP). */
11300#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11301 iemFpuStackFree(pVCpu, a_iStReg)
11302/** Increment the FPU stack pointer. */
11303#define IEM_MC_FPU_STACK_INC_TOP() \
11304 iemFpuStackIncTop(pVCpu)
11305/** Decrement the FPU stack pointer. */
11306#define IEM_MC_FPU_STACK_DEC_TOP() \
11307 iemFpuStackDecTop(pVCpu)
11308
11309/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11310#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11311 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11312/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11313#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11314 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11315/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11316#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11317 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11318/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11319#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11320 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11321/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11322 * stack. */
11323#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11324 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11325/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11326#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11327 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11328
11329/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11330#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11331 iemFpuStackUnderflow(pVCpu, a_iStDst)
11332/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11333 * stack. */
11334#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11335 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11336/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11337 * FPUDS. */
11338#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11339 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11340/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11341 * FPUDS. Pops stack. */
11342#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11343 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11344/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11345 * stack twice. */
11346#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11347 iemFpuStackUnderflowThenPopPop(pVCpu)
11348/** Raises a FPU stack underflow exception for an instruction pushing a result
11349 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11350#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11351 iemFpuStackPushUnderflow(pVCpu)
11352/** Raises a FPU stack underflow exception for an instruction pushing a result
11353 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11354#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11355 iemFpuStackPushUnderflowTwo(pVCpu)
11356
11357/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11358 * FPUIP, FPUCS and FOP. */
11359#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11360 iemFpuStackPushOverflow(pVCpu)
11361/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11362 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11363#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11364 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11365/** Prepares for using the FPU state.
11366 * Ensures that we can use the host FPU in the current context (RC+R0.
11367 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11368#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11369/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11370#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11371/** Actualizes the guest FPU state so it can be accessed and modified. */
11372#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11373
11374/** Prepares for using the SSE state.
11375 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11376 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11377#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11378/** Actualizes the guest XMM0..15 register state for read-only access. */
11379#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11380/** Actualizes the guest XMM0..15 register state for read-write access. */
11381#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11382
11383/**
11384 * Calls a MMX assembly implementation taking two visible arguments.
11385 *
11386 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11387 * @param a0 The first extra argument.
11388 * @param a1 The second extra argument.
11389 */
11390#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11391 do { \
11392 IEM_MC_PREPARE_FPU_USAGE(); \
11393 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11394 } while (0)
11395
11396/**
11397 * Calls a MMX assembly implementation taking three visible arguments.
11398 *
11399 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11400 * @param a0 The first extra argument.
11401 * @param a1 The second extra argument.
11402 * @param a2 The third extra argument.
11403 */
11404#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11405 do { \
11406 IEM_MC_PREPARE_FPU_USAGE(); \
11407 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11408 } while (0)
11409
11410
11411/**
11412 * Calls a SSE assembly implementation taking two visible arguments.
11413 *
11414 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11415 * @param a0 The first extra argument.
11416 * @param a1 The second extra argument.
11417 */
11418#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11419 do { \
11420 IEM_MC_PREPARE_SSE_USAGE(); \
11421 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11422 } while (0)
11423
11424/**
11425 * Calls a SSE assembly implementation taking three visible arguments.
11426 *
11427 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11428 * @param a0 The first extra argument.
11429 * @param a1 The second extra argument.
11430 * @param a2 The third extra argument.
11431 */
11432#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11433 do { \
11434 IEM_MC_PREPARE_SSE_USAGE(); \
11435 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11436 } while (0)
11437
11438/** @note Not for IOPL or IF testing. */
11439#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11440/** @note Not for IOPL or IF testing. */
11441#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11442/** @note Not for IOPL or IF testing. */
11443#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11444/** @note Not for IOPL or IF testing. */
11445#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11446/** @note Not for IOPL or IF testing. */
11447#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11448 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11449 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11450/** @note Not for IOPL or IF testing. */
11451#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11452 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11453 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11454/** @note Not for IOPL or IF testing. */
11455#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11456 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11457 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11458 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11459/** @note Not for IOPL or IF testing. */
11460#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11461 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11462 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11463 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11464#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11465#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11466#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11467/** @note Not for IOPL or IF testing. */
11468#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11469 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11470 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11471/** @note Not for IOPL or IF testing. */
11472#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11473 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11474 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11475/** @note Not for IOPL or IF testing. */
11476#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11477 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11478 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11479/** @note Not for IOPL or IF testing. */
11480#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11481 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11482 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11483/** @note Not for IOPL or IF testing. */
11484#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11485 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11486 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11487/** @note Not for IOPL or IF testing. */
11488#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11489 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11490 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11491#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11492#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11493
11494#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11495 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11496#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11497 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11498#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11499 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11500#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11501 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11502#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11503 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11504#define IEM_MC_IF_FCW_IM() \
11505 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11506
11507#define IEM_MC_ELSE() } else {
11508#define IEM_MC_ENDIF() } do {} while (0)
11509
11510/** @} */
11511
11512
11513/** @name Opcode Debug Helpers.
11514 * @{
11515 */
11516#ifdef VBOX_WITH_STATISTICS
11517# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11518#else
11519# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11520#endif
11521
11522#ifdef DEBUG
11523# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11524 do { \
11525 IEMOP_INC_STATS(a_Stats); \
11526 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11527 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11528 } while (0)
11529
11530# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11531 do { \
11532 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11533 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11534 (void)RT_CONCAT(OP_,a_Upper); \
11535 (void)(a_fDisHints); \
11536 (void)(a_fIemHints); \
11537 } while (0)
11538
11539# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11540 do { \
11541 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11542 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11543 (void)RT_CONCAT(OP_,a_Upper); \
11544 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11545 (void)(a_fDisHints); \
11546 (void)(a_fIemHints); \
11547 } while (0)
11548
11549# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11550 do { \
11551 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11552 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11553 (void)RT_CONCAT(OP_,a_Upper); \
11554 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11555 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11556 (void)(a_fDisHints); \
11557 (void)(a_fIemHints); \
11558 } while (0)
11559
11560# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11561 do { \
11562 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11563 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11564 (void)RT_CONCAT(OP_,a_Upper); \
11565 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11566 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11567 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11568 (void)(a_fDisHints); \
11569 (void)(a_fIemHints); \
11570 } while (0)
11571
11572# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11573 do { \
11574 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11575 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11576 (void)RT_CONCAT(OP_,a_Upper); \
11577 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11578 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11579 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11580 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11581 (void)(a_fDisHints); \
11582 (void)(a_fIemHints); \
11583 } while (0)
11584
11585#else
11586# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11587
11588# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11589 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11590# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11591 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11592# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11593 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11594# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11595 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11596# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11597 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11598
11599#endif
11600
11601#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11602 IEMOP_MNEMONIC0EX(a_Lower, \
11603 #a_Lower, \
11604 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
11605#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11606 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
11607 #a_Lower " " #a_Op1, \
11608 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
11609#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11610 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
11611 #a_Lower " " #a_Op1 "," #a_Op2, \
11612 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
11613#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11614 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
11615 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
11616 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
11617#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11618 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
11619 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
11620 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
11621
11622/** @} */
11623
11624
11625/** @name Opcode Helpers.
11626 * @{
11627 */
11628
11629#ifdef IN_RING3
11630# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11631 do { \
11632 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11633 else \
11634 { \
11635 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11636 return IEMOP_RAISE_INVALID_OPCODE(); \
11637 } \
11638 } while (0)
11639#else
11640# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11641 do { \
11642 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11643 else return IEMOP_RAISE_INVALID_OPCODE(); \
11644 } while (0)
11645#endif
11646
11647/** The instruction requires a 186 or later. */
11648#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11649# define IEMOP_HLP_MIN_186() do { } while (0)
11650#else
11651# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11652#endif
11653
11654/** The instruction requires a 286 or later. */
11655#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11656# define IEMOP_HLP_MIN_286() do { } while (0)
11657#else
11658# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11659#endif
11660
11661/** The instruction requires a 386 or later. */
11662#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11663# define IEMOP_HLP_MIN_386() do { } while (0)
11664#else
11665# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11666#endif
11667
11668/** The instruction requires a 386 or later if the given expression is true. */
11669#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11670# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11671#else
11672# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11673#endif
11674
11675/** The instruction requires a 486 or later. */
11676#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11677# define IEMOP_HLP_MIN_486() do { } while (0)
11678#else
11679# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11680#endif
11681
11682/** The instruction requires a Pentium (586) or later. */
11683#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11684# define IEMOP_HLP_MIN_586() do { } while (0)
11685#else
11686# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11687#endif
11688
11689/** The instruction requires a PentiumPro (686) or later. */
11690#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11691# define IEMOP_HLP_MIN_686() do { } while (0)
11692#else
11693# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11694#endif
11695
11696
11697/** The instruction raises an \#UD in real and V8086 mode. */
11698#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11699 do \
11700 { \
11701 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11702 return IEMOP_RAISE_INVALID_OPCODE(); \
11703 } while (0)
11704
11705#if 0
11706#ifdef VBOX_WITH_NESTED_HWVIRT
11707/** The instruction raises an \#UD when SVM is not enabled. */
11708#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
11709 do \
11710 { \
11711 if (IEM_IS_SVM_ENABLED(pVCpu)) \
11712 return IEMOP_RAISE_INVALID_OPCODE(); \
11713 } while (0)
11714#endif
11715#endif
11716
11717/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11718 * 64-bit mode. */
11719#define IEMOP_HLP_NO_64BIT() \
11720 do \
11721 { \
11722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11723 return IEMOP_RAISE_INVALID_OPCODE(); \
11724 } while (0)
11725
11726/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11727 * 64-bit mode. */
11728#define IEMOP_HLP_ONLY_64BIT() \
11729 do \
11730 { \
11731 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11732 return IEMOP_RAISE_INVALID_OPCODE(); \
11733 } while (0)
11734
11735/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11736#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11737 do \
11738 { \
11739 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11740 iemRecalEffOpSize64Default(pVCpu); \
11741 } while (0)
11742
11743/** The instruction has 64-bit operand size if 64-bit mode. */
11744#define IEMOP_HLP_64BIT_OP_SIZE() \
11745 do \
11746 { \
11747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11748 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11749 } while (0)
11750
11751/** Only a REX prefix immediately preceeding the first opcode byte takes
11752 * effect. This macro helps ensuring this as well as logging bad guest code. */
11753#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11754 do \
11755 { \
11756 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11757 { \
11758 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11759 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11760 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11761 pVCpu->iem.s.uRexB = 0; \
11762 pVCpu->iem.s.uRexIndex = 0; \
11763 pVCpu->iem.s.uRexReg = 0; \
11764 iemRecalEffOpSize(pVCpu); \
11765 } \
11766 } while (0)
11767
11768/**
11769 * Done decoding.
11770 */
11771#define IEMOP_HLP_DONE_DECODING() \
11772 do \
11773 { \
11774 /*nothing for now, maybe later... */ \
11775 } while (0)
11776
11777/**
11778 * Done decoding, raise \#UD exception if lock prefix present.
11779 */
11780#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11781 do \
11782 { \
11783 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11784 { /* likely */ } \
11785 else \
11786 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11787 } while (0)
11788#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11789 do \
11790 { \
11791 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11792 { /* likely */ } \
11793 else \
11794 { \
11795 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11796 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11797 } \
11798 } while (0)
11799#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11800 do \
11801 { \
11802 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11803 { /* likely */ } \
11804 else \
11805 { \
11806 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11807 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11808 } \
11809 } while (0)
11810
11811/**
11812 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11813 * are present.
11814 */
11815#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11816 do \
11817 { \
11818 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11819 { /* likely */ } \
11820 else \
11821 return IEMOP_RAISE_INVALID_OPCODE(); \
11822 } while (0)
11823
11824
11825/**
11826 * Calculates the effective address of a ModR/M memory operand.
11827 *
11828 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11829 *
11830 * @return Strict VBox status code.
11831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11832 * @param bRm The ModRM byte.
11833 * @param cbImm The size of any immediate following the
11834 * effective address opcode bytes. Important for
11835 * RIP relative addressing.
11836 * @param pGCPtrEff Where to return the effective address.
11837 */
11838IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11839{
11840 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11841 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11842# define SET_SS_DEF() \
11843 do \
11844 { \
11845 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11846 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11847 } while (0)
11848
11849 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11850 {
11851/** @todo Check the effective address size crap! */
11852 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11853 {
11854 uint16_t u16EffAddr;
11855
11856 /* Handle the disp16 form with no registers first. */
11857 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11858 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11859 else
11860 {
11861 /* Get the displacment. */
11862 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11863 {
11864 case 0: u16EffAddr = 0; break;
11865 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11866 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11867 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11868 }
11869
11870 /* Add the base and index registers to the disp. */
11871 switch (bRm & X86_MODRM_RM_MASK)
11872 {
11873 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11874 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11875 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11876 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11877 case 4: u16EffAddr += pCtx->si; break;
11878 case 5: u16EffAddr += pCtx->di; break;
11879 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11880 case 7: u16EffAddr += pCtx->bx; break;
11881 }
11882 }
11883
11884 *pGCPtrEff = u16EffAddr;
11885 }
11886 else
11887 {
11888 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11889 uint32_t u32EffAddr;
11890
11891 /* Handle the disp32 form with no registers first. */
11892 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11893 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11894 else
11895 {
11896 /* Get the register (or SIB) value. */
11897 switch ((bRm & X86_MODRM_RM_MASK))
11898 {
11899 case 0: u32EffAddr = pCtx->eax; break;
11900 case 1: u32EffAddr = pCtx->ecx; break;
11901 case 2: u32EffAddr = pCtx->edx; break;
11902 case 3: u32EffAddr = pCtx->ebx; break;
11903 case 4: /* SIB */
11904 {
11905 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11906
11907 /* Get the index and scale it. */
11908 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11909 {
11910 case 0: u32EffAddr = pCtx->eax; break;
11911 case 1: u32EffAddr = pCtx->ecx; break;
11912 case 2: u32EffAddr = pCtx->edx; break;
11913 case 3: u32EffAddr = pCtx->ebx; break;
11914 case 4: u32EffAddr = 0; /*none */ break;
11915 case 5: u32EffAddr = pCtx->ebp; break;
11916 case 6: u32EffAddr = pCtx->esi; break;
11917 case 7: u32EffAddr = pCtx->edi; break;
11918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11919 }
11920 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11921
11922 /* add base */
11923 switch (bSib & X86_SIB_BASE_MASK)
11924 {
11925 case 0: u32EffAddr += pCtx->eax; break;
11926 case 1: u32EffAddr += pCtx->ecx; break;
11927 case 2: u32EffAddr += pCtx->edx; break;
11928 case 3: u32EffAddr += pCtx->ebx; break;
11929 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11930 case 5:
11931 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11932 {
11933 u32EffAddr += pCtx->ebp;
11934 SET_SS_DEF();
11935 }
11936 else
11937 {
11938 uint32_t u32Disp;
11939 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11940 u32EffAddr += u32Disp;
11941 }
11942 break;
11943 case 6: u32EffAddr += pCtx->esi; break;
11944 case 7: u32EffAddr += pCtx->edi; break;
11945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11946 }
11947 break;
11948 }
11949 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11950 case 6: u32EffAddr = pCtx->esi; break;
11951 case 7: u32EffAddr = pCtx->edi; break;
11952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11953 }
11954
11955 /* Get and add the displacement. */
11956 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11957 {
11958 case 0:
11959 break;
11960 case 1:
11961 {
11962 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11963 u32EffAddr += i8Disp;
11964 break;
11965 }
11966 case 2:
11967 {
11968 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11969 u32EffAddr += u32Disp;
11970 break;
11971 }
11972 default:
11973 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11974 }
11975
11976 }
11977 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11978 *pGCPtrEff = u32EffAddr;
11979 else
11980 {
11981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11982 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11983 }
11984 }
11985 }
11986 else
11987 {
11988 uint64_t u64EffAddr;
11989
11990 /* Handle the rip+disp32 form with no registers first. */
11991 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11992 {
11993 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11994 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11995 }
11996 else
11997 {
11998 /* Get the register (or SIB) value. */
11999 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12000 {
12001 case 0: u64EffAddr = pCtx->rax; break;
12002 case 1: u64EffAddr = pCtx->rcx; break;
12003 case 2: u64EffAddr = pCtx->rdx; break;
12004 case 3: u64EffAddr = pCtx->rbx; break;
12005 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12006 case 6: u64EffAddr = pCtx->rsi; break;
12007 case 7: u64EffAddr = pCtx->rdi; break;
12008 case 8: u64EffAddr = pCtx->r8; break;
12009 case 9: u64EffAddr = pCtx->r9; break;
12010 case 10: u64EffAddr = pCtx->r10; break;
12011 case 11: u64EffAddr = pCtx->r11; break;
12012 case 13: u64EffAddr = pCtx->r13; break;
12013 case 14: u64EffAddr = pCtx->r14; break;
12014 case 15: u64EffAddr = pCtx->r15; break;
12015 /* SIB */
12016 case 4:
12017 case 12:
12018 {
12019 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12020
12021 /* Get the index and scale it. */
12022 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12023 {
12024 case 0: u64EffAddr = pCtx->rax; break;
12025 case 1: u64EffAddr = pCtx->rcx; break;
12026 case 2: u64EffAddr = pCtx->rdx; break;
12027 case 3: u64EffAddr = pCtx->rbx; break;
12028 case 4: u64EffAddr = 0; /*none */ break;
12029 case 5: u64EffAddr = pCtx->rbp; break;
12030 case 6: u64EffAddr = pCtx->rsi; break;
12031 case 7: u64EffAddr = pCtx->rdi; break;
12032 case 8: u64EffAddr = pCtx->r8; break;
12033 case 9: u64EffAddr = pCtx->r9; break;
12034 case 10: u64EffAddr = pCtx->r10; break;
12035 case 11: u64EffAddr = pCtx->r11; break;
12036 case 12: u64EffAddr = pCtx->r12; break;
12037 case 13: u64EffAddr = pCtx->r13; break;
12038 case 14: u64EffAddr = pCtx->r14; break;
12039 case 15: u64EffAddr = pCtx->r15; break;
12040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12041 }
12042 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12043
12044 /* add base */
12045 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12046 {
12047 case 0: u64EffAddr += pCtx->rax; break;
12048 case 1: u64EffAddr += pCtx->rcx; break;
12049 case 2: u64EffAddr += pCtx->rdx; break;
12050 case 3: u64EffAddr += pCtx->rbx; break;
12051 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12052 case 6: u64EffAddr += pCtx->rsi; break;
12053 case 7: u64EffAddr += pCtx->rdi; break;
12054 case 8: u64EffAddr += pCtx->r8; break;
12055 case 9: u64EffAddr += pCtx->r9; break;
12056 case 10: u64EffAddr += pCtx->r10; break;
12057 case 11: u64EffAddr += pCtx->r11; break;
12058 case 12: u64EffAddr += pCtx->r12; break;
12059 case 14: u64EffAddr += pCtx->r14; break;
12060 case 15: u64EffAddr += pCtx->r15; break;
12061 /* complicated encodings */
12062 case 5:
12063 case 13:
12064 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12065 {
12066 if (!pVCpu->iem.s.uRexB)
12067 {
12068 u64EffAddr += pCtx->rbp;
12069 SET_SS_DEF();
12070 }
12071 else
12072 u64EffAddr += pCtx->r13;
12073 }
12074 else
12075 {
12076 uint32_t u32Disp;
12077 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12078 u64EffAddr += (int32_t)u32Disp;
12079 }
12080 break;
12081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12082 }
12083 break;
12084 }
12085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12086 }
12087
12088 /* Get and add the displacement. */
12089 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12090 {
12091 case 0:
12092 break;
12093 case 1:
12094 {
12095 int8_t i8Disp;
12096 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12097 u64EffAddr += i8Disp;
12098 break;
12099 }
12100 case 2:
12101 {
12102 uint32_t u32Disp;
12103 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12104 u64EffAddr += (int32_t)u32Disp;
12105 break;
12106 }
12107 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12108 }
12109
12110 }
12111
12112 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12113 *pGCPtrEff = u64EffAddr;
12114 else
12115 {
12116 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12117 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12118 }
12119 }
12120
12121 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12122 return VINF_SUCCESS;
12123}
12124
12125
12126/**
12127 * Calculates the effective address of a ModR/M memory operand.
12128 *
12129 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12130 *
12131 * @return Strict VBox status code.
12132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12133 * @param bRm The ModRM byte.
12134 * @param cbImm The size of any immediate following the
12135 * effective address opcode bytes. Important for
12136 * RIP relative addressing.
12137 * @param pGCPtrEff Where to return the effective address.
12138 * @param offRsp RSP displacement.
12139 */
12140IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12141{
12142 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12143 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12144# define SET_SS_DEF() \
12145 do \
12146 { \
12147 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12148 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12149 } while (0)
12150
12151 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12152 {
12153/** @todo Check the effective address size crap! */
12154 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12155 {
12156 uint16_t u16EffAddr;
12157
12158 /* Handle the disp16 form with no registers first. */
12159 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12160 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12161 else
12162 {
12163 /* Get the displacment. */
12164 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12165 {
12166 case 0: u16EffAddr = 0; break;
12167 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12168 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12169 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12170 }
12171
12172 /* Add the base and index registers to the disp. */
12173 switch (bRm & X86_MODRM_RM_MASK)
12174 {
12175 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12176 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12177 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12178 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12179 case 4: u16EffAddr += pCtx->si; break;
12180 case 5: u16EffAddr += pCtx->di; break;
12181 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12182 case 7: u16EffAddr += pCtx->bx; break;
12183 }
12184 }
12185
12186 *pGCPtrEff = u16EffAddr;
12187 }
12188 else
12189 {
12190 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12191 uint32_t u32EffAddr;
12192
12193 /* Handle the disp32 form with no registers first. */
12194 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12195 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12196 else
12197 {
12198 /* Get the register (or SIB) value. */
12199 switch ((bRm & X86_MODRM_RM_MASK))
12200 {
12201 case 0: u32EffAddr = pCtx->eax; break;
12202 case 1: u32EffAddr = pCtx->ecx; break;
12203 case 2: u32EffAddr = pCtx->edx; break;
12204 case 3: u32EffAddr = pCtx->ebx; break;
12205 case 4: /* SIB */
12206 {
12207 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12208
12209 /* Get the index and scale it. */
12210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12211 {
12212 case 0: u32EffAddr = pCtx->eax; break;
12213 case 1: u32EffAddr = pCtx->ecx; break;
12214 case 2: u32EffAddr = pCtx->edx; break;
12215 case 3: u32EffAddr = pCtx->ebx; break;
12216 case 4: u32EffAddr = 0; /*none */ break;
12217 case 5: u32EffAddr = pCtx->ebp; break;
12218 case 6: u32EffAddr = pCtx->esi; break;
12219 case 7: u32EffAddr = pCtx->edi; break;
12220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12221 }
12222 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12223
12224 /* add base */
12225 switch (bSib & X86_SIB_BASE_MASK)
12226 {
12227 case 0: u32EffAddr += pCtx->eax; break;
12228 case 1: u32EffAddr += pCtx->ecx; break;
12229 case 2: u32EffAddr += pCtx->edx; break;
12230 case 3: u32EffAddr += pCtx->ebx; break;
12231 case 4:
12232 u32EffAddr += pCtx->esp + offRsp;
12233 SET_SS_DEF();
12234 break;
12235 case 5:
12236 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12237 {
12238 u32EffAddr += pCtx->ebp;
12239 SET_SS_DEF();
12240 }
12241 else
12242 {
12243 uint32_t u32Disp;
12244 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12245 u32EffAddr += u32Disp;
12246 }
12247 break;
12248 case 6: u32EffAddr += pCtx->esi; break;
12249 case 7: u32EffAddr += pCtx->edi; break;
12250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12251 }
12252 break;
12253 }
12254 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12255 case 6: u32EffAddr = pCtx->esi; break;
12256 case 7: u32EffAddr = pCtx->edi; break;
12257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12258 }
12259
12260 /* Get and add the displacement. */
12261 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12262 {
12263 case 0:
12264 break;
12265 case 1:
12266 {
12267 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12268 u32EffAddr += i8Disp;
12269 break;
12270 }
12271 case 2:
12272 {
12273 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12274 u32EffAddr += u32Disp;
12275 break;
12276 }
12277 default:
12278 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12279 }
12280
12281 }
12282 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12283 *pGCPtrEff = u32EffAddr;
12284 else
12285 {
12286 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12287 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12288 }
12289 }
12290 }
12291 else
12292 {
12293 uint64_t u64EffAddr;
12294
12295 /* Handle the rip+disp32 form with no registers first. */
12296 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12297 {
12298 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12299 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12300 }
12301 else
12302 {
12303 /* Get the register (or SIB) value. */
12304 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12305 {
12306 case 0: u64EffAddr = pCtx->rax; break;
12307 case 1: u64EffAddr = pCtx->rcx; break;
12308 case 2: u64EffAddr = pCtx->rdx; break;
12309 case 3: u64EffAddr = pCtx->rbx; break;
12310 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12311 case 6: u64EffAddr = pCtx->rsi; break;
12312 case 7: u64EffAddr = pCtx->rdi; break;
12313 case 8: u64EffAddr = pCtx->r8; break;
12314 case 9: u64EffAddr = pCtx->r9; break;
12315 case 10: u64EffAddr = pCtx->r10; break;
12316 case 11: u64EffAddr = pCtx->r11; break;
12317 case 13: u64EffAddr = pCtx->r13; break;
12318 case 14: u64EffAddr = pCtx->r14; break;
12319 case 15: u64EffAddr = pCtx->r15; break;
12320 /* SIB */
12321 case 4:
12322 case 12:
12323 {
12324 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12325
12326 /* Get the index and scale it. */
12327 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12328 {
12329 case 0: u64EffAddr = pCtx->rax; break;
12330 case 1: u64EffAddr = pCtx->rcx; break;
12331 case 2: u64EffAddr = pCtx->rdx; break;
12332 case 3: u64EffAddr = pCtx->rbx; break;
12333 case 4: u64EffAddr = 0; /*none */ break;
12334 case 5: u64EffAddr = pCtx->rbp; break;
12335 case 6: u64EffAddr = pCtx->rsi; break;
12336 case 7: u64EffAddr = pCtx->rdi; break;
12337 case 8: u64EffAddr = pCtx->r8; break;
12338 case 9: u64EffAddr = pCtx->r9; break;
12339 case 10: u64EffAddr = pCtx->r10; break;
12340 case 11: u64EffAddr = pCtx->r11; break;
12341 case 12: u64EffAddr = pCtx->r12; break;
12342 case 13: u64EffAddr = pCtx->r13; break;
12343 case 14: u64EffAddr = pCtx->r14; break;
12344 case 15: u64EffAddr = pCtx->r15; break;
12345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12346 }
12347 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12348
12349 /* add base */
12350 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12351 {
12352 case 0: u64EffAddr += pCtx->rax; break;
12353 case 1: u64EffAddr += pCtx->rcx; break;
12354 case 2: u64EffAddr += pCtx->rdx; break;
12355 case 3: u64EffAddr += pCtx->rbx; break;
12356 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12357 case 6: u64EffAddr += pCtx->rsi; break;
12358 case 7: u64EffAddr += pCtx->rdi; break;
12359 case 8: u64EffAddr += pCtx->r8; break;
12360 case 9: u64EffAddr += pCtx->r9; break;
12361 case 10: u64EffAddr += pCtx->r10; break;
12362 case 11: u64EffAddr += pCtx->r11; break;
12363 case 12: u64EffAddr += pCtx->r12; break;
12364 case 14: u64EffAddr += pCtx->r14; break;
12365 case 15: u64EffAddr += pCtx->r15; break;
12366 /* complicated encodings */
12367 case 5:
12368 case 13:
12369 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12370 {
12371 if (!pVCpu->iem.s.uRexB)
12372 {
12373 u64EffAddr += pCtx->rbp;
12374 SET_SS_DEF();
12375 }
12376 else
12377 u64EffAddr += pCtx->r13;
12378 }
12379 else
12380 {
12381 uint32_t u32Disp;
12382 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12383 u64EffAddr += (int32_t)u32Disp;
12384 }
12385 break;
12386 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12387 }
12388 break;
12389 }
12390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12391 }
12392
12393 /* Get and add the displacement. */
12394 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12395 {
12396 case 0:
12397 break;
12398 case 1:
12399 {
12400 int8_t i8Disp;
12401 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12402 u64EffAddr += i8Disp;
12403 break;
12404 }
12405 case 2:
12406 {
12407 uint32_t u32Disp;
12408 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12409 u64EffAddr += (int32_t)u32Disp;
12410 break;
12411 }
12412 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12413 }
12414
12415 }
12416
12417 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12418 *pGCPtrEff = u64EffAddr;
12419 else
12420 {
12421 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12422 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12423 }
12424 }
12425
12426 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12427 return VINF_SUCCESS;
12428}
12429
12430
12431#ifdef IEM_WITH_SETJMP
12432/**
12433 * Calculates the effective address of a ModR/M memory operand.
12434 *
12435 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12436 *
12437 * May longjmp on internal error.
12438 *
12439 * @return The effective address.
12440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12441 * @param bRm The ModRM byte.
12442 * @param cbImm The size of any immediate following the
12443 * effective address opcode bytes. Important for
12444 * RIP relative addressing.
12445 */
12446IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12447{
12448 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12449 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12450# define SET_SS_DEF() \
12451 do \
12452 { \
12453 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12454 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12455 } while (0)
12456
12457 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12458 {
12459/** @todo Check the effective address size crap! */
12460 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12461 {
12462 uint16_t u16EffAddr;
12463
12464 /* Handle the disp16 form with no registers first. */
12465 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12466 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12467 else
12468 {
12469 /* Get the displacment. */
12470 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12471 {
12472 case 0: u16EffAddr = 0; break;
12473 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12474 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12475 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12476 }
12477
12478 /* Add the base and index registers to the disp. */
12479 switch (bRm & X86_MODRM_RM_MASK)
12480 {
12481 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12482 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12483 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12484 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12485 case 4: u16EffAddr += pCtx->si; break;
12486 case 5: u16EffAddr += pCtx->di; break;
12487 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12488 case 7: u16EffAddr += pCtx->bx; break;
12489 }
12490 }
12491
12492 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12493 return u16EffAddr;
12494 }
12495
12496 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12497 uint32_t u32EffAddr;
12498
12499 /* Handle the disp32 form with no registers first. */
12500 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12501 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12502 else
12503 {
12504 /* Get the register (or SIB) value. */
12505 switch ((bRm & X86_MODRM_RM_MASK))
12506 {
12507 case 0: u32EffAddr = pCtx->eax; break;
12508 case 1: u32EffAddr = pCtx->ecx; break;
12509 case 2: u32EffAddr = pCtx->edx; break;
12510 case 3: u32EffAddr = pCtx->ebx; break;
12511 case 4: /* SIB */
12512 {
12513 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12514
12515 /* Get the index and scale it. */
12516 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12517 {
12518 case 0: u32EffAddr = pCtx->eax; break;
12519 case 1: u32EffAddr = pCtx->ecx; break;
12520 case 2: u32EffAddr = pCtx->edx; break;
12521 case 3: u32EffAddr = pCtx->ebx; break;
12522 case 4: u32EffAddr = 0; /*none */ break;
12523 case 5: u32EffAddr = pCtx->ebp; break;
12524 case 6: u32EffAddr = pCtx->esi; break;
12525 case 7: u32EffAddr = pCtx->edi; break;
12526 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12527 }
12528 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12529
12530 /* add base */
12531 switch (bSib & X86_SIB_BASE_MASK)
12532 {
12533 case 0: u32EffAddr += pCtx->eax; break;
12534 case 1: u32EffAddr += pCtx->ecx; break;
12535 case 2: u32EffAddr += pCtx->edx; break;
12536 case 3: u32EffAddr += pCtx->ebx; break;
12537 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12538 case 5:
12539 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12540 {
12541 u32EffAddr += pCtx->ebp;
12542 SET_SS_DEF();
12543 }
12544 else
12545 {
12546 uint32_t u32Disp;
12547 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12548 u32EffAddr += u32Disp;
12549 }
12550 break;
12551 case 6: u32EffAddr += pCtx->esi; break;
12552 case 7: u32EffAddr += pCtx->edi; break;
12553 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12554 }
12555 break;
12556 }
12557 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12558 case 6: u32EffAddr = pCtx->esi; break;
12559 case 7: u32EffAddr = pCtx->edi; break;
12560 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12561 }
12562
12563 /* Get and add the displacement. */
12564 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12565 {
12566 case 0:
12567 break;
12568 case 1:
12569 {
12570 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12571 u32EffAddr += i8Disp;
12572 break;
12573 }
12574 case 2:
12575 {
12576 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12577 u32EffAddr += u32Disp;
12578 break;
12579 }
12580 default:
12581 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12582 }
12583 }
12584
12585 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12586 {
12587 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12588 return u32EffAddr;
12589 }
12590 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12591 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12592 return u32EffAddr & UINT16_MAX;
12593 }
12594
12595 uint64_t u64EffAddr;
12596
12597 /* Handle the rip+disp32 form with no registers first. */
12598 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12599 {
12600 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12601 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12602 }
12603 else
12604 {
12605 /* Get the register (or SIB) value. */
12606 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12607 {
12608 case 0: u64EffAddr = pCtx->rax; break;
12609 case 1: u64EffAddr = pCtx->rcx; break;
12610 case 2: u64EffAddr = pCtx->rdx; break;
12611 case 3: u64EffAddr = pCtx->rbx; break;
12612 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12613 case 6: u64EffAddr = pCtx->rsi; break;
12614 case 7: u64EffAddr = pCtx->rdi; break;
12615 case 8: u64EffAddr = pCtx->r8; break;
12616 case 9: u64EffAddr = pCtx->r9; break;
12617 case 10: u64EffAddr = pCtx->r10; break;
12618 case 11: u64EffAddr = pCtx->r11; break;
12619 case 13: u64EffAddr = pCtx->r13; break;
12620 case 14: u64EffAddr = pCtx->r14; break;
12621 case 15: u64EffAddr = pCtx->r15; break;
12622 /* SIB */
12623 case 4:
12624 case 12:
12625 {
12626 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12627
12628 /* Get the index and scale it. */
12629 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12630 {
12631 case 0: u64EffAddr = pCtx->rax; break;
12632 case 1: u64EffAddr = pCtx->rcx; break;
12633 case 2: u64EffAddr = pCtx->rdx; break;
12634 case 3: u64EffAddr = pCtx->rbx; break;
12635 case 4: u64EffAddr = 0; /*none */ break;
12636 case 5: u64EffAddr = pCtx->rbp; break;
12637 case 6: u64EffAddr = pCtx->rsi; break;
12638 case 7: u64EffAddr = pCtx->rdi; break;
12639 case 8: u64EffAddr = pCtx->r8; break;
12640 case 9: u64EffAddr = pCtx->r9; break;
12641 case 10: u64EffAddr = pCtx->r10; break;
12642 case 11: u64EffAddr = pCtx->r11; break;
12643 case 12: u64EffAddr = pCtx->r12; break;
12644 case 13: u64EffAddr = pCtx->r13; break;
12645 case 14: u64EffAddr = pCtx->r14; break;
12646 case 15: u64EffAddr = pCtx->r15; break;
12647 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12648 }
12649 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12650
12651 /* add base */
12652 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12653 {
12654 case 0: u64EffAddr += pCtx->rax; break;
12655 case 1: u64EffAddr += pCtx->rcx; break;
12656 case 2: u64EffAddr += pCtx->rdx; break;
12657 case 3: u64EffAddr += pCtx->rbx; break;
12658 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12659 case 6: u64EffAddr += pCtx->rsi; break;
12660 case 7: u64EffAddr += pCtx->rdi; break;
12661 case 8: u64EffAddr += pCtx->r8; break;
12662 case 9: u64EffAddr += pCtx->r9; break;
12663 case 10: u64EffAddr += pCtx->r10; break;
12664 case 11: u64EffAddr += pCtx->r11; break;
12665 case 12: u64EffAddr += pCtx->r12; break;
12666 case 14: u64EffAddr += pCtx->r14; break;
12667 case 15: u64EffAddr += pCtx->r15; break;
12668 /* complicated encodings */
12669 case 5:
12670 case 13:
12671 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12672 {
12673 if (!pVCpu->iem.s.uRexB)
12674 {
12675 u64EffAddr += pCtx->rbp;
12676 SET_SS_DEF();
12677 }
12678 else
12679 u64EffAddr += pCtx->r13;
12680 }
12681 else
12682 {
12683 uint32_t u32Disp;
12684 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12685 u64EffAddr += (int32_t)u32Disp;
12686 }
12687 break;
12688 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12689 }
12690 break;
12691 }
12692 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12693 }
12694
12695 /* Get and add the displacement. */
12696 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12697 {
12698 case 0:
12699 break;
12700 case 1:
12701 {
12702 int8_t i8Disp;
12703 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12704 u64EffAddr += i8Disp;
12705 break;
12706 }
12707 case 2:
12708 {
12709 uint32_t u32Disp;
12710 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12711 u64EffAddr += (int32_t)u32Disp;
12712 break;
12713 }
12714 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12715 }
12716
12717 }
12718
12719 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12720 {
12721 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12722 return u64EffAddr;
12723 }
12724 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12725 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12726 return u64EffAddr & UINT32_MAX;
12727}
12728#endif /* IEM_WITH_SETJMP */
12729
12730
12731/** @} */
12732
12733
12734
12735/*
12736 * Include the instructions
12737 */
12738#include "IEMAllInstructions.cpp.h"
12739
12740
12741
12742
12743#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12744
12745/**
12746 * Sets up execution verification mode.
12747 */
12748IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12749{
12750 PVMCPU pVCpu = pVCpu;
12751 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12752
12753 /*
12754 * Always note down the address of the current instruction.
12755 */
12756 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12757 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12758
12759 /*
12760 * Enable verification and/or logging.
12761 */
12762 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12763 if ( fNewNoRem
12764 && ( 0
12765#if 0 /* auto enable on first paged protected mode interrupt */
12766 || ( pOrgCtx->eflags.Bits.u1IF
12767 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12768 && TRPMHasTrap(pVCpu)
12769 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12770#endif
12771#if 0
12772 || ( pOrgCtx->cs == 0x10
12773 && ( pOrgCtx->rip == 0x90119e3e
12774 || pOrgCtx->rip == 0x901d9810)
12775#endif
12776#if 0 /* Auto enable DSL - FPU stuff. */
12777 || ( pOrgCtx->cs == 0x10
12778 && (// pOrgCtx->rip == 0xc02ec07f
12779 //|| pOrgCtx->rip == 0xc02ec082
12780 //|| pOrgCtx->rip == 0xc02ec0c9
12781 0
12782 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12783#endif
12784#if 0 /* Auto enable DSL - fstp st0 stuff. */
12785 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12786#endif
12787#if 0
12788 || pOrgCtx->rip == 0x9022bb3a
12789#endif
12790#if 0
12791 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12792#endif
12793#if 0
12794 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12796#endif
12797#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12799 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12800 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12801#endif
12802#if 0 /* NT4SP1 - xadd early boot. */
12803 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12804#endif
12805#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12806 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12807#endif
12808#if 0 /* NT4SP1 - cmpxchg (AMD). */
12809 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12810#endif
12811#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12812 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12813#endif
12814#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12815 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12816
12817#endif
12818#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12819 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12820
12821#endif
12822#if 0 /* NT4SP1 - frstor [ecx] */
12823 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12824#endif
12825#if 0 /* xxxxxx - All long mode code. */
12826 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12827#endif
12828#if 0 /* rep movsq linux 3.7 64-bit boot. */
12829 || (pOrgCtx->rip == 0x0000000000100241)
12830#endif
12831#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12832 || (pOrgCtx->rip == 0x000000000215e240)
12833#endif
12834#if 0 /* DOS's size-overridden iret to v8086. */
12835 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12836#endif
12837 )
12838 )
12839 {
12840 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12841 RTLogFlags(NULL, "enabled");
12842 fNewNoRem = false;
12843 }
12844 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12845 {
12846 pVCpu->iem.s.fNoRem = fNewNoRem;
12847 if (!fNewNoRem)
12848 {
12849 LogAlways(("Enabling verification mode!\n"));
12850 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12851 }
12852 else
12853 LogAlways(("Disabling verification mode!\n"));
12854 }
12855
12856 /*
12857 * Switch state.
12858 */
12859 if (IEM_VERIFICATION_ENABLED(pVCpu))
12860 {
12861 static CPUMCTX s_DebugCtx; /* Ugly! */
12862
12863 s_DebugCtx = *pOrgCtx;
12864 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12865 }
12866
12867 /*
12868 * See if there is an interrupt pending in TRPM and inject it if we can.
12869 */
12870 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12871 if ( pOrgCtx->eflags.Bits.u1IF
12872 && TRPMHasTrap(pVCpu)
12873 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12874 {
12875 uint8_t u8TrapNo;
12876 TRPMEVENT enmType;
12877 RTGCUINT uErrCode;
12878 RTGCPTR uCr2;
12879 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12880 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12881 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12882 TRPMResetTrap(pVCpu);
12883 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12884 }
12885
12886 /*
12887 * Reset the counters.
12888 */
12889 pVCpu->iem.s.cIOReads = 0;
12890 pVCpu->iem.s.cIOWrites = 0;
12891 pVCpu->iem.s.fIgnoreRaxRdx = false;
12892 pVCpu->iem.s.fOverlappingMovs = false;
12893 pVCpu->iem.s.fProblematicMemory = false;
12894 pVCpu->iem.s.fUndefinedEFlags = 0;
12895
12896 if (IEM_VERIFICATION_ENABLED(pVCpu))
12897 {
12898 /*
12899 * Free all verification records.
12900 */
12901 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12902 pVCpu->iem.s.pIemEvtRecHead = NULL;
12903 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12904 do
12905 {
12906 while (pEvtRec)
12907 {
12908 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12909 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12910 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12911 pEvtRec = pNext;
12912 }
12913 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12914 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12915 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12916 } while (pEvtRec);
12917 }
12918}
12919
12920
12921/**
12922 * Allocate an event record.
12923 * @returns Pointer to a record.
12924 */
12925IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12926{
12927 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12928 return NULL;
12929
12930 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12931 if (pEvtRec)
12932 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12933 else
12934 {
12935 if (!pVCpu->iem.s.ppIemEvtRecNext)
12936 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12937
12938 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12939 if (!pEvtRec)
12940 return NULL;
12941 }
12942 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12943 pEvtRec->pNext = NULL;
12944 return pEvtRec;
12945}
12946
12947
12948/**
12949 * IOMMMIORead notification.
12950 */
12951VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12952{
12953 PVMCPU pVCpu = VMMGetCpu(pVM);
12954 if (!pVCpu)
12955 return;
12956 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12957 if (!pEvtRec)
12958 return;
12959 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12960 pEvtRec->u.RamRead.GCPhys = GCPhys;
12961 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12962 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12963 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12964}
12965
12966
12967/**
12968 * IOMMMIOWrite notification.
12969 */
12970VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12971{
12972 PVMCPU pVCpu = VMMGetCpu(pVM);
12973 if (!pVCpu)
12974 return;
12975 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12976 if (!pEvtRec)
12977 return;
12978 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12979 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12980 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12981 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12982 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12983 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12984 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12985 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12986 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12987}
12988
12989
12990/**
12991 * IOMIOPortRead notification.
12992 */
12993VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12994{
12995 PVMCPU pVCpu = VMMGetCpu(pVM);
12996 if (!pVCpu)
12997 return;
12998 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12999 if (!pEvtRec)
13000 return;
13001 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13002 pEvtRec->u.IOPortRead.Port = Port;
13003 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13004 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13005 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13006}
13007
13008/**
13009 * IOMIOPortWrite notification.
13010 */
13011VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13012{
13013 PVMCPU pVCpu = VMMGetCpu(pVM);
13014 if (!pVCpu)
13015 return;
13016 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13017 if (!pEvtRec)
13018 return;
13019 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13020 pEvtRec->u.IOPortWrite.Port = Port;
13021 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13022 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13023 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13024 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13025}
13026
13027
13028VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13029{
13030 PVMCPU pVCpu = VMMGetCpu(pVM);
13031 if (!pVCpu)
13032 return;
13033 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13034 if (!pEvtRec)
13035 return;
13036 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13037 pEvtRec->u.IOPortStrRead.Port = Port;
13038 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13039 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13040 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13041 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13042}
13043
13044
13045VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13046{
13047 PVMCPU pVCpu = VMMGetCpu(pVM);
13048 if (!pVCpu)
13049 return;
13050 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13051 if (!pEvtRec)
13052 return;
13053 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13054 pEvtRec->u.IOPortStrWrite.Port = Port;
13055 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13056 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13057 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13058 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13059}
13060
13061
13062/**
13063 * Fakes and records an I/O port read.
13064 *
13065 * @returns VINF_SUCCESS.
13066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13067 * @param Port The I/O port.
13068 * @param pu32Value Where to store the fake value.
13069 * @param cbValue The size of the access.
13070 */
13071IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13072{
13073 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13074 if (pEvtRec)
13075 {
13076 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13077 pEvtRec->u.IOPortRead.Port = Port;
13078 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13079 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13080 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13081 }
13082 pVCpu->iem.s.cIOReads++;
13083 *pu32Value = 0xcccccccc;
13084 return VINF_SUCCESS;
13085}
13086
13087
13088/**
13089 * Fakes and records an I/O port write.
13090 *
13091 * @returns VINF_SUCCESS.
13092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13093 * @param Port The I/O port.
13094 * @param u32Value The value being written.
13095 * @param cbValue The size of the access.
13096 */
13097IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13098{
13099 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13100 if (pEvtRec)
13101 {
13102 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13103 pEvtRec->u.IOPortWrite.Port = Port;
13104 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13105 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13106 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13107 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13108 }
13109 pVCpu->iem.s.cIOWrites++;
13110 return VINF_SUCCESS;
13111}
13112
13113
13114/**
13115 * Used to add extra details about a stub case.
13116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13117 */
13118IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13119{
13120 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13121 PVM pVM = pVCpu->CTX_SUFF(pVM);
13122 PVMCPU pVCpu = pVCpu;
13123 char szRegs[4096];
13124 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13125 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13126 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13127 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13128 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13129 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13130 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13131 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13132 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13133 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13134 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13135 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13136 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13137 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13138 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13139 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13140 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13141 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13142 " efer=%016VR{efer}\n"
13143 " pat=%016VR{pat}\n"
13144 " sf_mask=%016VR{sf_mask}\n"
13145 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13146 " lstar=%016VR{lstar}\n"
13147 " star=%016VR{star} cstar=%016VR{cstar}\n"
13148 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13149 );
13150
13151 char szInstr1[256];
13152 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13153 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13154 szInstr1, sizeof(szInstr1), NULL);
13155 char szInstr2[256];
13156 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13157 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13158 szInstr2, sizeof(szInstr2), NULL);
13159
13160 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13161}
13162
13163
13164/**
13165 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13166 * dump to the assertion info.
13167 *
13168 * @param pEvtRec The record to dump.
13169 */
13170IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13171{
13172 switch (pEvtRec->enmEvent)
13173 {
13174 case IEMVERIFYEVENT_IOPORT_READ:
13175 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13176 pEvtRec->u.IOPortWrite.Port,
13177 pEvtRec->u.IOPortWrite.cbValue);
13178 break;
13179 case IEMVERIFYEVENT_IOPORT_WRITE:
13180 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13181 pEvtRec->u.IOPortWrite.Port,
13182 pEvtRec->u.IOPortWrite.cbValue,
13183 pEvtRec->u.IOPortWrite.u32Value);
13184 break;
13185 case IEMVERIFYEVENT_IOPORT_STR_READ:
13186 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13187 pEvtRec->u.IOPortStrWrite.Port,
13188 pEvtRec->u.IOPortStrWrite.cbValue,
13189 pEvtRec->u.IOPortStrWrite.cTransfers);
13190 break;
13191 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13192 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13193 pEvtRec->u.IOPortStrWrite.Port,
13194 pEvtRec->u.IOPortStrWrite.cbValue,
13195 pEvtRec->u.IOPortStrWrite.cTransfers);
13196 break;
13197 case IEMVERIFYEVENT_RAM_READ:
13198 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13199 pEvtRec->u.RamRead.GCPhys,
13200 pEvtRec->u.RamRead.cb);
13201 break;
13202 case IEMVERIFYEVENT_RAM_WRITE:
13203 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13204 pEvtRec->u.RamWrite.GCPhys,
13205 pEvtRec->u.RamWrite.cb,
13206 (int)pEvtRec->u.RamWrite.cb,
13207 pEvtRec->u.RamWrite.ab);
13208 break;
13209 default:
13210 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13211 break;
13212 }
13213}
13214
13215
13216/**
13217 * Raises an assertion on the specified record, showing the given message with
13218 * a record dump attached.
13219 *
13220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13221 * @param pEvtRec1 The first record.
13222 * @param pEvtRec2 The second record.
13223 * @param pszMsg The message explaining why we're asserting.
13224 */
13225IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13226{
13227 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13228 iemVerifyAssertAddRecordDump(pEvtRec1);
13229 iemVerifyAssertAddRecordDump(pEvtRec2);
13230 iemVerifyAssertMsg2(pVCpu);
13231 RTAssertPanic();
13232}
13233
13234
13235/**
13236 * Raises an assertion on the specified record, showing the given message with
13237 * a record dump attached.
13238 *
13239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13240 * @param pEvtRec1 The first record.
13241 * @param pszMsg The message explaining why we're asserting.
13242 */
13243IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13244{
13245 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13246 iemVerifyAssertAddRecordDump(pEvtRec);
13247 iemVerifyAssertMsg2(pVCpu);
13248 RTAssertPanic();
13249}
13250
13251
13252/**
13253 * Verifies a write record.
13254 *
13255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13256 * @param pEvtRec The write record.
13257 * @param fRem Set if REM was doing the other executing. If clear
13258 * it was HM.
13259 */
13260IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13261{
13262 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13263 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13264 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13265 if ( RT_FAILURE(rc)
13266 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13267 {
13268 /* fend off ins */
13269 if ( !pVCpu->iem.s.cIOReads
13270 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13271 || ( pEvtRec->u.RamWrite.cb != 1
13272 && pEvtRec->u.RamWrite.cb != 2
13273 && pEvtRec->u.RamWrite.cb != 4) )
13274 {
13275 /* fend off ROMs and MMIO */
13276 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13277 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13278 {
13279 /* fend off fxsave */
13280 if (pEvtRec->u.RamWrite.cb != 512)
13281 {
13282 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13283 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13284 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13285 RTAssertMsg2Add("%s: %.*Rhxs\n"
13286 "iem: %.*Rhxs\n",
13287 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13288 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13289 iemVerifyAssertAddRecordDump(pEvtRec);
13290 iemVerifyAssertMsg2(pVCpu);
13291 RTAssertPanic();
13292 }
13293 }
13294 }
13295 }
13296
13297}
13298
13299/**
13300 * Performs the post-execution verfication checks.
13301 */
13302IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13303{
13304 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13305 return rcStrictIem;
13306
13307 /*
13308 * Switch back the state.
13309 */
13310 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13311 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13312 Assert(pOrgCtx != pDebugCtx);
13313 IEM_GET_CTX(pVCpu) = pOrgCtx;
13314
13315 /*
13316 * Execute the instruction in REM.
13317 */
13318 bool fRem = false;
13319 PVM pVM = pVCpu->CTX_SUFF(pVM);
13320 PVMCPU pVCpu = pVCpu;
13321 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13322#ifdef IEM_VERIFICATION_MODE_FULL_HM
13323 if ( HMIsEnabled(pVM)
13324 && pVCpu->iem.s.cIOReads == 0
13325 && pVCpu->iem.s.cIOWrites == 0
13326 && !pVCpu->iem.s.fProblematicMemory)
13327 {
13328 uint64_t uStartRip = pOrgCtx->rip;
13329 unsigned iLoops = 0;
13330 do
13331 {
13332 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13333 iLoops++;
13334 } while ( rc == VINF_SUCCESS
13335 || ( rc == VINF_EM_DBG_STEPPED
13336 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13337 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13338 || ( pOrgCtx->rip != pDebugCtx->rip
13339 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13340 && iLoops < 8) );
13341 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13342 rc = VINF_SUCCESS;
13343 }
13344#endif
13345 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13346 || rc == VINF_IOM_R3_IOPORT_READ
13347 || rc == VINF_IOM_R3_IOPORT_WRITE
13348 || rc == VINF_IOM_R3_MMIO_READ
13349 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13350 || rc == VINF_IOM_R3_MMIO_WRITE
13351 || rc == VINF_CPUM_R3_MSR_READ
13352 || rc == VINF_CPUM_R3_MSR_WRITE
13353 || rc == VINF_EM_RESCHEDULE
13354 )
13355 {
13356 EMRemLock(pVM);
13357 rc = REMR3EmulateInstruction(pVM, pVCpu);
13358 AssertRC(rc);
13359 EMRemUnlock(pVM);
13360 fRem = true;
13361 }
13362
13363# if 1 /* Skip unimplemented instructions for now. */
13364 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13365 {
13366 IEM_GET_CTX(pVCpu) = pOrgCtx;
13367 if (rc == VINF_EM_DBG_STEPPED)
13368 return VINF_SUCCESS;
13369 return rc;
13370 }
13371# endif
13372
13373 /*
13374 * Compare the register states.
13375 */
13376 unsigned cDiffs = 0;
13377 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13378 {
13379 //Log(("REM and IEM ends up with different registers!\n"));
13380 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13381
13382# define CHECK_FIELD(a_Field) \
13383 do \
13384 { \
13385 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13386 { \
13387 switch (sizeof(pOrgCtx->a_Field)) \
13388 { \
13389 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13390 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13391 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13392 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13393 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13394 } \
13395 cDiffs++; \
13396 } \
13397 } while (0)
13398# define CHECK_XSTATE_FIELD(a_Field) \
13399 do \
13400 { \
13401 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13402 { \
13403 switch (sizeof(pOrgXState->a_Field)) \
13404 { \
13405 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13406 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13407 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13408 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13409 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13410 } \
13411 cDiffs++; \
13412 } \
13413 } while (0)
13414
13415# define CHECK_BIT_FIELD(a_Field) \
13416 do \
13417 { \
13418 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13419 { \
13420 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13421 cDiffs++; \
13422 } \
13423 } while (0)
13424
13425# define CHECK_SEL(a_Sel) \
13426 do \
13427 { \
13428 CHECK_FIELD(a_Sel.Sel); \
13429 CHECK_FIELD(a_Sel.Attr.u); \
13430 CHECK_FIELD(a_Sel.u64Base); \
13431 CHECK_FIELD(a_Sel.u32Limit); \
13432 CHECK_FIELD(a_Sel.fFlags); \
13433 } while (0)
13434
13435 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13436 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13437
13438#if 1 /* The recompiler doesn't update these the intel way. */
13439 if (fRem)
13440 {
13441 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13442 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13443 pOrgXState->x87.CS = pDebugXState->x87.CS;
13444 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13445 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13446 pOrgXState->x87.DS = pDebugXState->x87.DS;
13447 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13448 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13449 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13450 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13451 }
13452#endif
13453 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13454 {
13455 RTAssertMsg2Weak(" the FPU state differs\n");
13456 cDiffs++;
13457 CHECK_XSTATE_FIELD(x87.FCW);
13458 CHECK_XSTATE_FIELD(x87.FSW);
13459 CHECK_XSTATE_FIELD(x87.FTW);
13460 CHECK_XSTATE_FIELD(x87.FOP);
13461 CHECK_XSTATE_FIELD(x87.FPUIP);
13462 CHECK_XSTATE_FIELD(x87.CS);
13463 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13464 CHECK_XSTATE_FIELD(x87.FPUDP);
13465 CHECK_XSTATE_FIELD(x87.DS);
13466 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13467 CHECK_XSTATE_FIELD(x87.MXCSR);
13468 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13469 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13470 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13471 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13472 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13473 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13474 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13475 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13476 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13477 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13478 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13479 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13480 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13481 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13482 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13483 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13484 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13485 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13486 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13487 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13488 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13489 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13490 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13491 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13492 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13493 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13494 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13495 }
13496 CHECK_FIELD(rip);
13497 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13498 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13499 {
13500 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13501 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13502 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13503 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13504 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13505 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13506 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13507 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13508 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13509 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13510 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13511 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13512 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13513 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13514 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13515 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13516 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13517 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13518 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13519 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13520 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13521 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13522 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13523 }
13524
13525 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13526 CHECK_FIELD(rax);
13527 CHECK_FIELD(rcx);
13528 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13529 CHECK_FIELD(rdx);
13530 CHECK_FIELD(rbx);
13531 CHECK_FIELD(rsp);
13532 CHECK_FIELD(rbp);
13533 CHECK_FIELD(rsi);
13534 CHECK_FIELD(rdi);
13535 CHECK_FIELD(r8);
13536 CHECK_FIELD(r9);
13537 CHECK_FIELD(r10);
13538 CHECK_FIELD(r11);
13539 CHECK_FIELD(r12);
13540 CHECK_FIELD(r13);
13541 CHECK_SEL(cs);
13542 CHECK_SEL(ss);
13543 CHECK_SEL(ds);
13544 CHECK_SEL(es);
13545 CHECK_SEL(fs);
13546 CHECK_SEL(gs);
13547 CHECK_FIELD(cr0);
13548
13549 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13550 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13551 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13552 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13553 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13554 {
13555 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13556 { /* ignore */ }
13557 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13558 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13559 && fRem)
13560 { /* ignore */ }
13561 else
13562 CHECK_FIELD(cr2);
13563 }
13564 CHECK_FIELD(cr3);
13565 CHECK_FIELD(cr4);
13566 CHECK_FIELD(dr[0]);
13567 CHECK_FIELD(dr[1]);
13568 CHECK_FIELD(dr[2]);
13569 CHECK_FIELD(dr[3]);
13570 CHECK_FIELD(dr[6]);
13571 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13572 CHECK_FIELD(dr[7]);
13573 CHECK_FIELD(gdtr.cbGdt);
13574 CHECK_FIELD(gdtr.pGdt);
13575 CHECK_FIELD(idtr.cbIdt);
13576 CHECK_FIELD(idtr.pIdt);
13577 CHECK_SEL(ldtr);
13578 CHECK_SEL(tr);
13579 CHECK_FIELD(SysEnter.cs);
13580 CHECK_FIELD(SysEnter.eip);
13581 CHECK_FIELD(SysEnter.esp);
13582 CHECK_FIELD(msrEFER);
13583 CHECK_FIELD(msrSTAR);
13584 CHECK_FIELD(msrPAT);
13585 CHECK_FIELD(msrLSTAR);
13586 CHECK_FIELD(msrCSTAR);
13587 CHECK_FIELD(msrSFMASK);
13588 CHECK_FIELD(msrKERNELGSBASE);
13589
13590 if (cDiffs != 0)
13591 {
13592 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13593 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13594 RTAssertPanic();
13595 static bool volatile s_fEnterDebugger = true;
13596 if (s_fEnterDebugger)
13597 DBGFSTOP(pVM);
13598
13599# if 1 /* Ignore unimplemented instructions for now. */
13600 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13601 rcStrictIem = VINF_SUCCESS;
13602# endif
13603 }
13604# undef CHECK_FIELD
13605# undef CHECK_BIT_FIELD
13606 }
13607
13608 /*
13609 * If the register state compared fine, check the verification event
13610 * records.
13611 */
13612 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13613 {
13614 /*
13615 * Compare verficiation event records.
13616 * - I/O port accesses should be a 1:1 match.
13617 */
13618 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13619 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13620 while (pIemRec && pOtherRec)
13621 {
13622 /* Since we might miss RAM writes and reads, ignore reads and check
13623 that any written memory is the same extra ones. */
13624 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13625 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13626 && pIemRec->pNext)
13627 {
13628 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13629 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13630 pIemRec = pIemRec->pNext;
13631 }
13632
13633 /* Do the compare. */
13634 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13635 {
13636 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13637 break;
13638 }
13639 bool fEquals;
13640 switch (pIemRec->enmEvent)
13641 {
13642 case IEMVERIFYEVENT_IOPORT_READ:
13643 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13644 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13645 break;
13646 case IEMVERIFYEVENT_IOPORT_WRITE:
13647 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13648 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13649 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13650 break;
13651 case IEMVERIFYEVENT_IOPORT_STR_READ:
13652 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13653 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13654 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13655 break;
13656 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13657 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13658 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13659 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13660 break;
13661 case IEMVERIFYEVENT_RAM_READ:
13662 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13663 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13664 break;
13665 case IEMVERIFYEVENT_RAM_WRITE:
13666 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13667 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13668 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13669 break;
13670 default:
13671 fEquals = false;
13672 break;
13673 }
13674 if (!fEquals)
13675 {
13676 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13677 break;
13678 }
13679
13680 /* advance */
13681 pIemRec = pIemRec->pNext;
13682 pOtherRec = pOtherRec->pNext;
13683 }
13684
13685 /* Ignore extra writes and reads. */
13686 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13687 {
13688 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13689 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13690 pIemRec = pIemRec->pNext;
13691 }
13692 if (pIemRec != NULL)
13693 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13694 else if (pOtherRec != NULL)
13695 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13696 }
13697 IEM_GET_CTX(pVCpu) = pOrgCtx;
13698
13699 return rcStrictIem;
13700}
13701
13702#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13703
13704/* stubs */
13705IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13706{
13707 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13708 return VERR_INTERNAL_ERROR;
13709}
13710
13711IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13712{
13713 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13714 return VERR_INTERNAL_ERROR;
13715}
13716
13717#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13718
13719
13720#ifdef LOG_ENABLED
13721/**
13722 * Logs the current instruction.
13723 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13724 * @param pCtx The current CPU context.
13725 * @param fSameCtx Set if we have the same context information as the VMM,
13726 * clear if we may have already executed an instruction in
13727 * our debug context. When clear, we assume IEMCPU holds
13728 * valid CPU mode info.
13729 */
13730IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13731{
13732# ifdef IN_RING3
13733 if (LogIs2Enabled())
13734 {
13735 char szInstr[256];
13736 uint32_t cbInstr = 0;
13737 if (fSameCtx)
13738 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13739 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13740 szInstr, sizeof(szInstr), &cbInstr);
13741 else
13742 {
13743 uint32_t fFlags = 0;
13744 switch (pVCpu->iem.s.enmCpuMode)
13745 {
13746 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13747 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13748 case IEMMODE_16BIT:
13749 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13750 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13751 else
13752 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13753 break;
13754 }
13755 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13756 szInstr, sizeof(szInstr), &cbInstr);
13757 }
13758
13759 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13760 Log2(("****\n"
13761 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13762 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13763 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13764 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13765 " %s\n"
13766 ,
13767 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13768 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13769 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13770 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13771 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13772 szInstr));
13773
13774 if (LogIs3Enabled())
13775 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13776 }
13777 else
13778# endif
13779 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13780 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13781 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13782}
13783#endif
13784
13785
13786/**
13787 * Makes status code addjustments (pass up from I/O and access handler)
13788 * as well as maintaining statistics.
13789 *
13790 * @returns Strict VBox status code to pass up.
13791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13792 * @param rcStrict The status from executing an instruction.
13793 */
13794DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13795{
13796 if (rcStrict != VINF_SUCCESS)
13797 {
13798 if (RT_SUCCESS(rcStrict))
13799 {
13800 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13801 || rcStrict == VINF_IOM_R3_IOPORT_READ
13802 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13803 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13804 || rcStrict == VINF_IOM_R3_MMIO_READ
13805 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13806 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13807 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13808 || rcStrict == VINF_CPUM_R3_MSR_READ
13809 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13810 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13811 || rcStrict == VINF_EM_RAW_TO_R3
13812 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13813 /* raw-mode / virt handlers only: */
13814 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13815 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13816 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13817 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13818 || rcStrict == VINF_SELM_SYNC_GDT
13819 || rcStrict == VINF_CSAM_PENDING_ACTION
13820 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13821 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13822/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13823 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13824 if (rcPassUp == VINF_SUCCESS)
13825 pVCpu->iem.s.cRetInfStatuses++;
13826 else if ( rcPassUp < VINF_EM_FIRST
13827 || rcPassUp > VINF_EM_LAST
13828 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13829 {
13830 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13831 pVCpu->iem.s.cRetPassUpStatus++;
13832 rcStrict = rcPassUp;
13833 }
13834 else
13835 {
13836 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13837 pVCpu->iem.s.cRetInfStatuses++;
13838 }
13839 }
13840 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13841 pVCpu->iem.s.cRetAspectNotImplemented++;
13842 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13843 pVCpu->iem.s.cRetInstrNotImplemented++;
13844#ifdef IEM_VERIFICATION_MODE_FULL
13845 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13846 rcStrict = VINF_SUCCESS;
13847#endif
13848 else
13849 pVCpu->iem.s.cRetErrStatuses++;
13850 }
13851 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13852 {
13853 pVCpu->iem.s.cRetPassUpStatus++;
13854 rcStrict = pVCpu->iem.s.rcPassUp;
13855 }
13856
13857 return rcStrict;
13858}
13859
13860
13861/**
13862 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13863 * IEMExecOneWithPrefetchedByPC.
13864 *
13865 * Similar code is found in IEMExecLots.
13866 *
13867 * @return Strict VBox status code.
13868 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13870 * @param fExecuteInhibit If set, execute the instruction following CLI,
13871 * POP SS and MOV SS,GR.
13872 */
13873DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13874{
13875#ifdef IEM_WITH_SETJMP
13876 VBOXSTRICTRC rcStrict;
13877 jmp_buf JmpBuf;
13878 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13879 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13880 if ((rcStrict = setjmp(JmpBuf)) == 0)
13881 {
13882 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13883 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13884 }
13885 else
13886 pVCpu->iem.s.cLongJumps++;
13887 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13888#else
13889 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13890 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13891#endif
13892 if (rcStrict == VINF_SUCCESS)
13893 pVCpu->iem.s.cInstructions++;
13894 if (pVCpu->iem.s.cActiveMappings > 0)
13895 {
13896 Assert(rcStrict != VINF_SUCCESS);
13897 iemMemRollback(pVCpu);
13898 }
13899//#ifdef DEBUG
13900// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13901//#endif
13902
13903 /* Execute the next instruction as well if a cli, pop ss or
13904 mov ss, Gr has just completed successfully. */
13905 if ( fExecuteInhibit
13906 && rcStrict == VINF_SUCCESS
13907 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13908 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13909 {
13910 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13911 if (rcStrict == VINF_SUCCESS)
13912 {
13913#ifdef LOG_ENABLED
13914 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13915#endif
13916#ifdef IEM_WITH_SETJMP
13917 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13918 if ((rcStrict = setjmp(JmpBuf)) == 0)
13919 {
13920 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13921 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13922 }
13923 else
13924 pVCpu->iem.s.cLongJumps++;
13925 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13926#else
13927 IEM_OPCODE_GET_NEXT_U8(&b);
13928 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13929#endif
13930 if (rcStrict == VINF_SUCCESS)
13931 pVCpu->iem.s.cInstructions++;
13932 if (pVCpu->iem.s.cActiveMappings > 0)
13933 {
13934 Assert(rcStrict != VINF_SUCCESS);
13935 iemMemRollback(pVCpu);
13936 }
13937 }
13938 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13939 }
13940
13941 /*
13942 * Return value fiddling, statistics and sanity assertions.
13943 */
13944 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13945
13946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13948#if defined(IEM_VERIFICATION_MODE_FULL)
13949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13952 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13953#endif
13954 return rcStrict;
13955}
13956
13957
13958#ifdef IN_RC
13959/**
13960 * Re-enters raw-mode or ensure we return to ring-3.
13961 *
13962 * @returns rcStrict, maybe modified.
13963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13964 * @param pCtx The current CPU context.
13965 * @param rcStrict The status code returne by the interpreter.
13966 */
13967DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13968{
13969 if ( !pVCpu->iem.s.fInPatchCode
13970 && ( rcStrict == VINF_SUCCESS
13971 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13972 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13973 {
13974 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13975 CPUMRawEnter(pVCpu);
13976 else
13977 {
13978 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13979 rcStrict = VINF_EM_RESCHEDULE;
13980 }
13981 }
13982 return rcStrict;
13983}
13984#endif
13985
13986
13987/**
13988 * Execute one instruction.
13989 *
13990 * @return Strict VBox status code.
13991 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13992 */
13993VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13994{
13995#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13996 if (++pVCpu->iem.s.cVerifyDepth == 1)
13997 iemExecVerificationModeSetup(pVCpu);
13998#endif
13999#ifdef LOG_ENABLED
14000 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14001 iemLogCurInstr(pVCpu, pCtx, true);
14002#endif
14003
14004 /*
14005 * Do the decoding and emulation.
14006 */
14007 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14008 if (rcStrict == VINF_SUCCESS)
14009 rcStrict = iemExecOneInner(pVCpu, true);
14010
14011#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14012 /*
14013 * Assert some sanity.
14014 */
14015 if (pVCpu->iem.s.cVerifyDepth == 1)
14016 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14017 pVCpu->iem.s.cVerifyDepth--;
14018#endif
14019#ifdef IN_RC
14020 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14021#endif
14022 if (rcStrict != VINF_SUCCESS)
14023 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14024 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14025 return rcStrict;
14026}
14027
14028
14029VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14030{
14031 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14032 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14033
14034 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14035 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14036 if (rcStrict == VINF_SUCCESS)
14037 {
14038 rcStrict = iemExecOneInner(pVCpu, true);
14039 if (pcbWritten)
14040 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14041 }
14042
14043#ifdef IN_RC
14044 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14045#endif
14046 return rcStrict;
14047}
14048
14049
14050VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14051 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14052{
14053 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14054 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14055
14056 VBOXSTRICTRC rcStrict;
14057 if ( cbOpcodeBytes
14058 && pCtx->rip == OpcodeBytesPC)
14059 {
14060 iemInitDecoder(pVCpu, false);
14061#ifdef IEM_WITH_CODE_TLB
14062 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14063 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14064 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14065 pVCpu->iem.s.offCurInstrStart = 0;
14066 pVCpu->iem.s.offInstrNextByte = 0;
14067#else
14068 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14069 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14070#endif
14071 rcStrict = VINF_SUCCESS;
14072 }
14073 else
14074 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14075 if (rcStrict == VINF_SUCCESS)
14076 {
14077 rcStrict = iemExecOneInner(pVCpu, true);
14078 }
14079
14080#ifdef IN_RC
14081 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14082#endif
14083 return rcStrict;
14084}
14085
14086
14087VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14088{
14089 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14090 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14091
14092 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14093 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14094 if (rcStrict == VINF_SUCCESS)
14095 {
14096 rcStrict = iemExecOneInner(pVCpu, false);
14097 if (pcbWritten)
14098 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14099 }
14100
14101#ifdef IN_RC
14102 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14103#endif
14104 return rcStrict;
14105}
14106
14107
14108VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14109 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14110{
14111 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14112 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14113
14114 VBOXSTRICTRC rcStrict;
14115 if ( cbOpcodeBytes
14116 && pCtx->rip == OpcodeBytesPC)
14117 {
14118 iemInitDecoder(pVCpu, true);
14119#ifdef IEM_WITH_CODE_TLB
14120 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14121 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14122 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14123 pVCpu->iem.s.offCurInstrStart = 0;
14124 pVCpu->iem.s.offInstrNextByte = 0;
14125#else
14126 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14127 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14128#endif
14129 rcStrict = VINF_SUCCESS;
14130 }
14131 else
14132 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14133 if (rcStrict == VINF_SUCCESS)
14134 rcStrict = iemExecOneInner(pVCpu, false);
14135
14136#ifdef IN_RC
14137 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14138#endif
14139 return rcStrict;
14140}
14141
14142
14143/**
14144 * For debugging DISGetParamSize, may come in handy.
14145 *
14146 * @returns Strict VBox status code.
14147 * @param pVCpu The cross context virtual CPU structure of the
14148 * calling EMT.
14149 * @param pCtxCore The context core structure.
14150 * @param OpcodeBytesPC The PC of the opcode bytes.
14151 * @param pvOpcodeBytes Prefeched opcode bytes.
14152 * @param cbOpcodeBytes Number of prefetched bytes.
14153 * @param pcbWritten Where to return the number of bytes written.
14154 * Optional.
14155 */
14156VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14157 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14158 uint32_t *pcbWritten)
14159{
14160 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14161 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14162
14163 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14164 VBOXSTRICTRC rcStrict;
14165 if ( cbOpcodeBytes
14166 && pCtx->rip == OpcodeBytesPC)
14167 {
14168 iemInitDecoder(pVCpu, true);
14169#ifdef IEM_WITH_CODE_TLB
14170 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14171 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14172 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14173 pVCpu->iem.s.offCurInstrStart = 0;
14174 pVCpu->iem.s.offInstrNextByte = 0;
14175#else
14176 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14177 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14178#endif
14179 rcStrict = VINF_SUCCESS;
14180 }
14181 else
14182 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14183 if (rcStrict == VINF_SUCCESS)
14184 {
14185 rcStrict = iemExecOneInner(pVCpu, false);
14186 if (pcbWritten)
14187 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14188 }
14189
14190#ifdef IN_RC
14191 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14192#endif
14193 return rcStrict;
14194}
14195
14196
14197VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14198{
14199 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14200
14201#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14202 /*
14203 * See if there is an interrupt pending in TRPM, inject it if we can.
14204 */
14205 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14206# ifdef IEM_VERIFICATION_MODE_FULL
14207 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14208# endif
14209 if ( pCtx->eflags.Bits.u1IF
14210 && TRPMHasTrap(pVCpu)
14211 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14212 {
14213 uint8_t u8TrapNo;
14214 TRPMEVENT enmType;
14215 RTGCUINT uErrCode;
14216 RTGCPTR uCr2;
14217 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14218 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14219 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14220 TRPMResetTrap(pVCpu);
14221 }
14222
14223 /*
14224 * Log the state.
14225 */
14226# ifdef LOG_ENABLED
14227 iemLogCurInstr(pVCpu, pCtx, true);
14228# endif
14229
14230 /*
14231 * Do the decoding and emulation.
14232 */
14233 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14234 if (rcStrict == VINF_SUCCESS)
14235 rcStrict = iemExecOneInner(pVCpu, true);
14236
14237 /*
14238 * Assert some sanity.
14239 */
14240 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14241
14242 /*
14243 * Log and return.
14244 */
14245 if (rcStrict != VINF_SUCCESS)
14246 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14247 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14248 if (pcInstructions)
14249 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14250 return rcStrict;
14251
14252#else /* Not verification mode */
14253
14254 /*
14255 * See if there is an interrupt pending in TRPM, inject it if we can.
14256 */
14257 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14258# ifdef IEM_VERIFICATION_MODE_FULL
14259 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14260# endif
14261 if ( pCtx->eflags.Bits.u1IF
14262 && TRPMHasTrap(pVCpu)
14263 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14264 {
14265 uint8_t u8TrapNo;
14266 TRPMEVENT enmType;
14267 RTGCUINT uErrCode;
14268 RTGCPTR uCr2;
14269 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14270 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14271 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14272 TRPMResetTrap(pVCpu);
14273 }
14274
14275 /*
14276 * Initial decoder init w/ prefetch, then setup setjmp.
14277 */
14278 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14279 if (rcStrict == VINF_SUCCESS)
14280 {
14281# ifdef IEM_WITH_SETJMP
14282 jmp_buf JmpBuf;
14283 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14284 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14285 pVCpu->iem.s.cActiveMappings = 0;
14286 if ((rcStrict = setjmp(JmpBuf)) == 0)
14287# endif
14288 {
14289 /*
14290 * The run loop. We limit ourselves to 4096 instructions right now.
14291 */
14292 PVM pVM = pVCpu->CTX_SUFF(pVM);
14293 uint32_t cInstr = 4096;
14294 for (;;)
14295 {
14296 /*
14297 * Log the state.
14298 */
14299# ifdef LOG_ENABLED
14300 iemLogCurInstr(pVCpu, pCtx, true);
14301# endif
14302
14303 /*
14304 * Do the decoding and emulation.
14305 */
14306 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14307 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14308 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14309 {
14310 Assert(pVCpu->iem.s.cActiveMappings == 0);
14311 pVCpu->iem.s.cInstructions++;
14312 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14313 {
14314 uint32_t fCpu = pVCpu->fLocalForcedActions
14315 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14316 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14317 | VMCPU_FF_TLB_FLUSH
14318# ifdef VBOX_WITH_RAW_MODE
14319 | VMCPU_FF_TRPM_SYNC_IDT
14320 | VMCPU_FF_SELM_SYNC_TSS
14321 | VMCPU_FF_SELM_SYNC_GDT
14322 | VMCPU_FF_SELM_SYNC_LDT
14323# endif
14324 | VMCPU_FF_INHIBIT_INTERRUPTS
14325 | VMCPU_FF_BLOCK_NMIS
14326 | VMCPU_FF_UNHALT ));
14327
14328 if (RT_LIKELY( ( !fCpu
14329 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14330 && !pCtx->rflags.Bits.u1IF) )
14331 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14332 {
14333 if (cInstr-- > 0)
14334 {
14335 Assert(pVCpu->iem.s.cActiveMappings == 0);
14336 iemReInitDecoder(pVCpu);
14337 continue;
14338 }
14339 }
14340 }
14341 Assert(pVCpu->iem.s.cActiveMappings == 0);
14342 }
14343 else if (pVCpu->iem.s.cActiveMappings > 0)
14344 iemMemRollback(pVCpu);
14345 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14346 break;
14347 }
14348 }
14349# ifdef IEM_WITH_SETJMP
14350 else
14351 {
14352 if (pVCpu->iem.s.cActiveMappings > 0)
14353 iemMemRollback(pVCpu);
14354 pVCpu->iem.s.cLongJumps++;
14355 }
14356 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14357# endif
14358
14359 /*
14360 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14361 */
14362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14364# if defined(IEM_VERIFICATION_MODE_FULL)
14365 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14367 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14368 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14369# endif
14370 }
14371
14372 /*
14373 * Maybe re-enter raw-mode and log.
14374 */
14375# ifdef IN_RC
14376 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14377# endif
14378 if (rcStrict != VINF_SUCCESS)
14379 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14380 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14381 if (pcInstructions)
14382 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14383 return rcStrict;
14384#endif /* Not verification mode */
14385}
14386
14387
14388
14389/**
14390 * Injects a trap, fault, abort, software interrupt or external interrupt.
14391 *
14392 * The parameter list matches TRPMQueryTrapAll pretty closely.
14393 *
14394 * @returns Strict VBox status code.
14395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14396 * @param u8TrapNo The trap number.
14397 * @param enmType What type is it (trap/fault/abort), software
14398 * interrupt or hardware interrupt.
14399 * @param uErrCode The error code if applicable.
14400 * @param uCr2 The CR2 value if applicable.
14401 * @param cbInstr The instruction length (only relevant for
14402 * software interrupts).
14403 */
14404VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14405 uint8_t cbInstr)
14406{
14407 iemInitDecoder(pVCpu, false);
14408#ifdef DBGFTRACE_ENABLED
14409 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14410 u8TrapNo, enmType, uErrCode, uCr2);
14411#endif
14412
14413 uint32_t fFlags;
14414 switch (enmType)
14415 {
14416 case TRPM_HARDWARE_INT:
14417 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14418 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14419 uErrCode = uCr2 = 0;
14420 break;
14421
14422 case TRPM_SOFTWARE_INT:
14423 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14424 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14425 uErrCode = uCr2 = 0;
14426 break;
14427
14428 case TRPM_TRAP:
14429 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14430 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14431 if (u8TrapNo == X86_XCPT_PF)
14432 fFlags |= IEM_XCPT_FLAGS_CR2;
14433 switch (u8TrapNo)
14434 {
14435 case X86_XCPT_DF:
14436 case X86_XCPT_TS:
14437 case X86_XCPT_NP:
14438 case X86_XCPT_SS:
14439 case X86_XCPT_PF:
14440 case X86_XCPT_AC:
14441 fFlags |= IEM_XCPT_FLAGS_ERR;
14442 break;
14443
14444 case X86_XCPT_NMI:
14445 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14446 break;
14447 }
14448 break;
14449
14450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14451 }
14452
14453 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14454}
14455
14456
14457/**
14458 * Injects the active TRPM event.
14459 *
14460 * @returns Strict VBox status code.
14461 * @param pVCpu The cross context virtual CPU structure.
14462 */
14463VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14464{
14465#ifndef IEM_IMPLEMENTS_TASKSWITCH
14466 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14467#else
14468 uint8_t u8TrapNo;
14469 TRPMEVENT enmType;
14470 RTGCUINT uErrCode;
14471 RTGCUINTPTR uCr2;
14472 uint8_t cbInstr;
14473 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14474 if (RT_FAILURE(rc))
14475 return rc;
14476
14477 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14478
14479 /** @todo Are there any other codes that imply the event was successfully
14480 * delivered to the guest? See @bugref{6607}. */
14481 if ( rcStrict == VINF_SUCCESS
14482 || rcStrict == VINF_IEM_RAISED_XCPT)
14483 {
14484 TRPMResetTrap(pVCpu);
14485 }
14486 return rcStrict;
14487#endif
14488}
14489
14490
14491VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14492{
14493 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14494 return VERR_NOT_IMPLEMENTED;
14495}
14496
14497
14498VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14499{
14500 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14501 return VERR_NOT_IMPLEMENTED;
14502}
14503
14504
14505#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14506/**
14507 * Executes a IRET instruction with default operand size.
14508 *
14509 * This is for PATM.
14510 *
14511 * @returns VBox status code.
14512 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14513 * @param pCtxCore The register frame.
14514 */
14515VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14516{
14517 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14518
14519 iemCtxCoreToCtx(pCtx, pCtxCore);
14520 iemInitDecoder(pVCpu);
14521 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14522 if (rcStrict == VINF_SUCCESS)
14523 iemCtxToCtxCore(pCtxCore, pCtx);
14524 else
14525 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14526 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14527 return rcStrict;
14528}
14529#endif
14530
14531
14532/**
14533 * Macro used by the IEMExec* method to check the given instruction length.
14534 *
14535 * Will return on failure!
14536 *
14537 * @param a_cbInstr The given instruction length.
14538 * @param a_cbMin The minimum length.
14539 */
14540#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14541 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14542 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14543
14544
14545/**
14546 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14547 *
14548 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14549 *
14550 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14552 * @param rcStrict The status code to fiddle.
14553 */
14554DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14555{
14556 iemUninitExec(pVCpu);
14557#ifdef IN_RC
14558 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14559 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14560#else
14561 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14562#endif
14563}
14564
14565
14566/**
14567 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14568 *
14569 * This API ASSUMES that the caller has already verified that the guest code is
14570 * allowed to access the I/O port. (The I/O port is in the DX register in the
14571 * guest state.)
14572 *
14573 * @returns Strict VBox status code.
14574 * @param pVCpu The cross context virtual CPU structure.
14575 * @param cbValue The size of the I/O port access (1, 2, or 4).
14576 * @param enmAddrMode The addressing mode.
14577 * @param fRepPrefix Indicates whether a repeat prefix is used
14578 * (doesn't matter which for this instruction).
14579 * @param cbInstr The instruction length in bytes.
14580 * @param iEffSeg The effective segment address.
14581 * @param fIoChecked Whether the access to the I/O port has been
14582 * checked or not. It's typically checked in the
14583 * HM scenario.
14584 */
14585VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14586 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14587{
14588 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14590
14591 /*
14592 * State init.
14593 */
14594 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14595
14596 /*
14597 * Switch orgy for getting to the right handler.
14598 */
14599 VBOXSTRICTRC rcStrict;
14600 if (fRepPrefix)
14601 {
14602 switch (enmAddrMode)
14603 {
14604 case IEMMODE_16BIT:
14605 switch (cbValue)
14606 {
14607 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14608 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14609 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14610 default:
14611 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14612 }
14613 break;
14614
14615 case IEMMODE_32BIT:
14616 switch (cbValue)
14617 {
14618 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14619 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14620 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14621 default:
14622 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14623 }
14624 break;
14625
14626 case IEMMODE_64BIT:
14627 switch (cbValue)
14628 {
14629 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14630 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14631 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14632 default:
14633 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14634 }
14635 break;
14636
14637 default:
14638 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14639 }
14640 }
14641 else
14642 {
14643 switch (enmAddrMode)
14644 {
14645 case IEMMODE_16BIT:
14646 switch (cbValue)
14647 {
14648 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14649 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14650 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14651 default:
14652 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14653 }
14654 break;
14655
14656 case IEMMODE_32BIT:
14657 switch (cbValue)
14658 {
14659 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14660 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14661 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14662 default:
14663 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14664 }
14665 break;
14666
14667 case IEMMODE_64BIT:
14668 switch (cbValue)
14669 {
14670 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14671 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14672 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14673 default:
14674 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14675 }
14676 break;
14677
14678 default:
14679 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14680 }
14681 }
14682
14683 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14684}
14685
14686
14687/**
14688 * Interface for HM and EM for executing string I/O IN (read) instructions.
14689 *
14690 * This API ASSUMES that the caller has already verified that the guest code is
14691 * allowed to access the I/O port. (The I/O port is in the DX register in the
14692 * guest state.)
14693 *
14694 * @returns Strict VBox status code.
14695 * @param pVCpu The cross context virtual CPU structure.
14696 * @param cbValue The size of the I/O port access (1, 2, or 4).
14697 * @param enmAddrMode The addressing mode.
14698 * @param fRepPrefix Indicates whether a repeat prefix is used
14699 * (doesn't matter which for this instruction).
14700 * @param cbInstr The instruction length in bytes.
14701 * @param fIoChecked Whether the access to the I/O port has been
14702 * checked or not. It's typically checked in the
14703 * HM scenario.
14704 */
14705VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14706 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14707{
14708 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14709
14710 /*
14711 * State init.
14712 */
14713 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14714
14715 /*
14716 * Switch orgy for getting to the right handler.
14717 */
14718 VBOXSTRICTRC rcStrict;
14719 if (fRepPrefix)
14720 {
14721 switch (enmAddrMode)
14722 {
14723 case IEMMODE_16BIT:
14724 switch (cbValue)
14725 {
14726 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14727 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14728 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14729 default:
14730 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14731 }
14732 break;
14733
14734 case IEMMODE_32BIT:
14735 switch (cbValue)
14736 {
14737 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14738 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14739 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14740 default:
14741 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14742 }
14743 break;
14744
14745 case IEMMODE_64BIT:
14746 switch (cbValue)
14747 {
14748 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14749 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14750 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14751 default:
14752 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14753 }
14754 break;
14755
14756 default:
14757 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14758 }
14759 }
14760 else
14761 {
14762 switch (enmAddrMode)
14763 {
14764 case IEMMODE_16BIT:
14765 switch (cbValue)
14766 {
14767 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14768 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14769 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14770 default:
14771 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14772 }
14773 break;
14774
14775 case IEMMODE_32BIT:
14776 switch (cbValue)
14777 {
14778 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14779 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14780 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14781 default:
14782 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14783 }
14784 break;
14785
14786 case IEMMODE_64BIT:
14787 switch (cbValue)
14788 {
14789 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14790 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14791 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14792 default:
14793 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14794 }
14795 break;
14796
14797 default:
14798 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14799 }
14800 }
14801
14802 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14803}
14804
14805
14806/**
14807 * Interface for rawmode to write execute an OUT instruction.
14808 *
14809 * @returns Strict VBox status code.
14810 * @param pVCpu The cross context virtual CPU structure.
14811 * @param cbInstr The instruction length in bytes.
14812 * @param u16Port The port to read.
14813 * @param cbReg The register size.
14814 *
14815 * @remarks In ring-0 not all of the state needs to be synced in.
14816 */
14817VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14818{
14819 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14820 Assert(cbReg <= 4 && cbReg != 3);
14821
14822 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14823 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14824 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14825}
14826
14827
14828/**
14829 * Interface for rawmode to write execute an IN instruction.
14830 *
14831 * @returns Strict VBox status code.
14832 * @param pVCpu The cross context virtual CPU structure.
14833 * @param cbInstr The instruction length in bytes.
14834 * @param u16Port The port to read.
14835 * @param cbReg The register size.
14836 */
14837VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14838{
14839 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14840 Assert(cbReg <= 4 && cbReg != 3);
14841
14842 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14843 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14844 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14845}
14846
14847
14848/**
14849 * Interface for HM and EM to write to a CRx register.
14850 *
14851 * @returns Strict VBox status code.
14852 * @param pVCpu The cross context virtual CPU structure.
14853 * @param cbInstr The instruction length in bytes.
14854 * @param iCrReg The control register number (destination).
14855 * @param iGReg The general purpose register number (source).
14856 *
14857 * @remarks In ring-0 not all of the state needs to be synced in.
14858 */
14859VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14860{
14861 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14862 Assert(iCrReg < 16);
14863 Assert(iGReg < 16);
14864
14865 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14866 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14867 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14868}
14869
14870
14871/**
14872 * Interface for HM and EM to read from a CRx register.
14873 *
14874 * @returns Strict VBox status code.
14875 * @param pVCpu The cross context virtual CPU structure.
14876 * @param cbInstr The instruction length in bytes.
14877 * @param iGReg The general purpose register number (destination).
14878 * @param iCrReg The control register number (source).
14879 *
14880 * @remarks In ring-0 not all of the state needs to be synced in.
14881 */
14882VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14883{
14884 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14885 Assert(iCrReg < 16);
14886 Assert(iGReg < 16);
14887
14888 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14889 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14890 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14891}
14892
14893
14894/**
14895 * Interface for HM and EM to clear the CR0[TS] bit.
14896 *
14897 * @returns Strict VBox status code.
14898 * @param pVCpu The cross context virtual CPU structure.
14899 * @param cbInstr The instruction length in bytes.
14900 *
14901 * @remarks In ring-0 not all of the state needs to be synced in.
14902 */
14903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14904{
14905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14906
14907 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14908 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14909 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14910}
14911
14912
14913/**
14914 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14915 *
14916 * @returns Strict VBox status code.
14917 * @param pVCpu The cross context virtual CPU structure.
14918 * @param cbInstr The instruction length in bytes.
14919 * @param uValue The value to load into CR0.
14920 *
14921 * @remarks In ring-0 not all of the state needs to be synced in.
14922 */
14923VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14924{
14925 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14926
14927 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14928 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14929 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14930}
14931
14932
14933/**
14934 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14935 *
14936 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14937 *
14938 * @returns Strict VBox status code.
14939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14940 * @param cbInstr The instruction length in bytes.
14941 * @remarks In ring-0 not all of the state needs to be synced in.
14942 * @thread EMT(pVCpu)
14943 */
14944VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14945{
14946 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14947
14948 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14949 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14950 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14951}
14952
14953
14954#ifdef VBOX_WITH_NESTED_HWVIRT
14955/**
14956 * Checks if IEM is in the process of delivering an event (interrupt or
14957 * exception).
14958 *
14959 * @returns true if it's raising an interrupt or exception, false otherwise.
14960 * @param pVCpu The cross context virtual CPU structure.
14961 */
14962VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
14963{
14964 return pVCpu->iem.s.cXcptRecursions > 0;
14965}
14966
14967
14968/**
14969 * Interface for HM and EM to emulate the STGI instruction.
14970 *
14971 * @returns Strict VBox status code.
14972 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14973 * @param cbInstr The instruction length in bytes.
14974 * @thread EMT(pVCpu)
14975 */
14976VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
14977{
14978 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14979
14980 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14981 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
14982 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14983}
14984
14985
14986/**
14987 * Interface for HM and EM to emulate the STGI instruction.
14988 *
14989 * @returns Strict VBox status code.
14990 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14991 * @param cbInstr The instruction length in bytes.
14992 * @thread EMT(pVCpu)
14993 */
14994VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
14995{
14996 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14997
14998 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14999 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15000 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15001}
15002
15003
15004/**
15005 * Interface for HM and EM to emulate the VMLOAD instruction.
15006 *
15007 * @returns Strict VBox status code.
15008 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15009 * @param cbInstr The instruction length in bytes.
15010 * @thread EMT(pVCpu)
15011 */
15012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15013{
15014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15015
15016 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15017 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15018 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15019}
15020
15021
15022/**
15023 * Interface for HM and EM to emulate the VMSAVE instruction.
15024 *
15025 * @returns Strict VBox status code.
15026 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15027 * @param cbInstr The instruction length in bytes.
15028 * @thread EMT(pVCpu)
15029 */
15030VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15031{
15032 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15033
15034 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15035 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15036 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15037}
15038
15039
15040/**
15041 * Interface for HM and EM to emulate the INVLPGA instruction.
15042 *
15043 * @returns Strict VBox status code.
15044 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15045 * @param cbInstr The instruction length in bytes.
15046 * @thread EMT(pVCpu)
15047 */
15048VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15049{
15050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15051
15052 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15053 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15055}
15056#endif /* VBOX_WITH_NESTED_HWVIRT */
15057
15058#ifdef IN_RING3
15059
15060/**
15061 * Handles the unlikely and probably fatal merge cases.
15062 *
15063 * @returns Merged status code.
15064 * @param rcStrict Current EM status code.
15065 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15066 * with @a rcStrict.
15067 * @param iMemMap The memory mapping index. For error reporting only.
15068 * @param pVCpu The cross context virtual CPU structure of the calling
15069 * thread, for error reporting only.
15070 */
15071DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15072 unsigned iMemMap, PVMCPU pVCpu)
15073{
15074 if (RT_FAILURE_NP(rcStrict))
15075 return rcStrict;
15076
15077 if (RT_FAILURE_NP(rcStrictCommit))
15078 return rcStrictCommit;
15079
15080 if (rcStrict == rcStrictCommit)
15081 return rcStrictCommit;
15082
15083 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15084 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15085 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15086 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15087 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15088 return VERR_IOM_FF_STATUS_IPE;
15089}
15090
15091
15092/**
15093 * Helper for IOMR3ProcessForceFlag.
15094 *
15095 * @returns Merged status code.
15096 * @param rcStrict Current EM status code.
15097 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15098 * with @a rcStrict.
15099 * @param iMemMap The memory mapping index. For error reporting only.
15100 * @param pVCpu The cross context virtual CPU structure of the calling
15101 * thread, for error reporting only.
15102 */
15103DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15104{
15105 /* Simple. */
15106 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15107 return rcStrictCommit;
15108
15109 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15110 return rcStrict;
15111
15112 /* EM scheduling status codes. */
15113 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15114 && rcStrict <= VINF_EM_LAST))
15115 {
15116 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15117 && rcStrictCommit <= VINF_EM_LAST))
15118 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15119 }
15120
15121 /* Unlikely */
15122 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15123}
15124
15125
15126/**
15127 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15128 *
15129 * @returns Merge between @a rcStrict and what the commit operation returned.
15130 * @param pVM The cross context VM structure.
15131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15132 * @param rcStrict The status code returned by ring-0 or raw-mode.
15133 */
15134VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15135{
15136 /*
15137 * Reset the pending commit.
15138 */
15139 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15140 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15141 ("%#x %#x %#x\n",
15142 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15143 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15144
15145 /*
15146 * Commit the pending bounce buffers (usually just one).
15147 */
15148 unsigned cBufs = 0;
15149 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15150 while (iMemMap-- > 0)
15151 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15152 {
15153 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15154 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15155 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15156
15157 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15158 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15159 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15160
15161 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15162 {
15163 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15164 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15165 pbBuf,
15166 cbFirst,
15167 PGMACCESSORIGIN_IEM);
15168 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15169 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15170 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15171 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15172 }
15173
15174 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15175 {
15176 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15178 pbBuf + cbFirst,
15179 cbSecond,
15180 PGMACCESSORIGIN_IEM);
15181 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15182 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15183 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15184 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15185 }
15186 cBufs++;
15187 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15188 }
15189
15190 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15191 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15192 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15193 pVCpu->iem.s.cActiveMappings = 0;
15194 return rcStrict;
15195}
15196
15197#endif /* IN_RING3 */
15198
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette