VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62189

Last change on this file since 62189 was 62189, checked in by vboxsync, 8 years ago

IEM: A little bit more on the code TLB.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 567.8 KB
Line 
1/* $Id: IEMAll.cpp 62189 2016-07-12 12:05:16Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84//#define IEM_WITH_CODE_TLB - work in progress
85
86
87/*********************************************************************************************************************************
88* Header Files *
89*********************************************************************************************************************************/
90#define LOG_GROUP LOG_GROUP_IEM
91#define VMCPU_INCL_CPUM_GST_CTX
92#include <VBox/vmm/iem.h>
93#include <VBox/vmm/cpum.h>
94#include <VBox/vmm/pdm.h>
95#include <VBox/vmm/pgm.h>
96#include <internal/pgm.h>
97#include <VBox/vmm/iom.h>
98#include <VBox/vmm/em.h>
99#include <VBox/vmm/hm.h>
100#include <VBox/vmm/tm.h>
101#include <VBox/vmm/dbgf.h>
102#include <VBox/vmm/dbgftrace.h>
103#ifdef VBOX_WITH_RAW_MODE_NOT_R0
104# include <VBox/vmm/patm.h>
105# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
106# include <VBox/vmm/csam.h>
107# endif
108#endif
109#include "IEMInternal.h"
110#ifdef IEM_VERIFICATION_MODE_FULL
111# include <VBox/vmm/rem.h>
112# include <VBox/vmm/mm.h>
113#endif
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211
212/*********************************************************************************************************************************
213* Defined Constants And Macros *
214*********************************************************************************************************************************/
215/** @def IEM_WITH_SETJMP
216 * Enables alternative status code handling using setjmps.
217 *
218 * This adds a bit of expense via the setjmp() call since it saves all the
219 * non-volatile registers. However, it eliminates return code checks and allows
220 * for more optimal return value passing (return regs instead of stack buffer).
221 */
222#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
223# define IEM_WITH_SETJMP
224#endif
225
226/** Temporary hack to disable the double execution. Will be removed in favor
227 * of a dedicated execution mode in EM. */
228//#define IEM_VERIFICATION_MODE_NO_REM
229
230/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
231 * due to GCC lacking knowledge about the value range of a switch. */
232#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
233
234/**
235 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
236 * occation.
237 */
238#ifdef LOG_ENABLED
239# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
240 do { \
241 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
242 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
243 } while (0)
244#else
245# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
246 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
247#endif
248
249/**
250 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
251 * occation using the supplied logger statement.
252 *
253 * @param a_LoggerArgs What to log on failure.
254 */
255#ifdef LOG_ENABLED
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
257 do { \
258 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
259 /*LogFunc(a_LoggerArgs);*/ \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
261 } while (0)
262#else
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
265#endif
266
267/**
268 * Call an opcode decoder function.
269 *
270 * We're using macors for this so that adding and removing parameters can be
271 * done as we please. See FNIEMOP_DEF.
272 */
273#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
274
275/**
276 * Call a common opcode decoder function taking one extra argument.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF_1.
280 */
281#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
290
291/**
292 * Check if we're currently executing in real or virtual 8086 mode.
293 *
294 * @returns @c true if it is, @c false if not.
295 * @param a_pVCpu The IEM state of the current CPU.
296 */
297#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
298
299/**
300 * Check if we're currently executing in virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
304 */
305#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in long mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in real mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
325 * @returns PCCPUMFEATURES
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
336
337/**
338 * Evaluates to true if we're presenting an Intel CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
341
342/**
343 * Evaluates to true if we're presenting an AMD CPU to the guest.
344 */
345#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
346
347/**
348 * Check if the address is canonical.
349 */
350#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
351
352/** @def IEM_USE_UNALIGNED_DATA_ACCESS
353 * Use unaligned accesses instead of elaborate byte assembly. */
354#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
355# define IEM_USE_UNALIGNED_DATA_ACCESS
356#endif
357
358
359/*********************************************************************************************************************************
360* Global Variables *
361*********************************************************************************************************************************/
362extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
363
364
365/** Function table for the ADD instruction. */
366IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
367{
368 iemAImpl_add_u8, iemAImpl_add_u8_locked,
369 iemAImpl_add_u16, iemAImpl_add_u16_locked,
370 iemAImpl_add_u32, iemAImpl_add_u32_locked,
371 iemAImpl_add_u64, iemAImpl_add_u64_locked
372};
373
374/** Function table for the ADC instruction. */
375IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
376{
377 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
378 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
379 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
380 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
381};
382
383/** Function table for the SUB instruction. */
384IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
385{
386 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
387 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
388 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
389 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
390};
391
392/** Function table for the SBB instruction. */
393IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
394{
395 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
396 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
397 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
398 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
399};
400
401/** Function table for the OR instruction. */
402IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
403{
404 iemAImpl_or_u8, iemAImpl_or_u8_locked,
405 iemAImpl_or_u16, iemAImpl_or_u16_locked,
406 iemAImpl_or_u32, iemAImpl_or_u32_locked,
407 iemAImpl_or_u64, iemAImpl_or_u64_locked
408};
409
410/** Function table for the XOR instruction. */
411IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
412{
413 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
414 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
415 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
416 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
417};
418
419/** Function table for the AND instruction. */
420IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
421{
422 iemAImpl_and_u8, iemAImpl_and_u8_locked,
423 iemAImpl_and_u16, iemAImpl_and_u16_locked,
424 iemAImpl_and_u32, iemAImpl_and_u32_locked,
425 iemAImpl_and_u64, iemAImpl_and_u64_locked
426};
427
428/** Function table for the CMP instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
432{
433 iemAImpl_cmp_u8, NULL,
434 iemAImpl_cmp_u16, NULL,
435 iemAImpl_cmp_u32, NULL,
436 iemAImpl_cmp_u64, NULL
437};
438
439/** Function table for the TEST instruction.
440 * @remarks Making operand order ASSUMPTIONS.
441 */
442IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
443{
444 iemAImpl_test_u8, NULL,
445 iemAImpl_test_u16, NULL,
446 iemAImpl_test_u32, NULL,
447 iemAImpl_test_u64, NULL
448};
449
450/** Function table for the BT instruction. */
451IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
452{
453 NULL, NULL,
454 iemAImpl_bt_u16, NULL,
455 iemAImpl_bt_u32, NULL,
456 iemAImpl_bt_u64, NULL
457};
458
459/** Function table for the BTC instruction. */
460IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
461{
462 NULL, NULL,
463 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
464 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
465 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
466};
467
468/** Function table for the BTR instruction. */
469IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
470{
471 NULL, NULL,
472 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
473 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
474 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
475};
476
477/** Function table for the BTS instruction. */
478IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
479{
480 NULL, NULL,
481 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
482 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
483 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
484};
485
486/** Function table for the BSF instruction. */
487IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
488{
489 NULL, NULL,
490 iemAImpl_bsf_u16, NULL,
491 iemAImpl_bsf_u32, NULL,
492 iemAImpl_bsf_u64, NULL
493};
494
495/** Function table for the BSR instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
497{
498 NULL, NULL,
499 iemAImpl_bsr_u16, NULL,
500 iemAImpl_bsr_u32, NULL,
501 iemAImpl_bsr_u64, NULL
502};
503
504/** Function table for the IMUL instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
506{
507 NULL, NULL,
508 iemAImpl_imul_two_u16, NULL,
509 iemAImpl_imul_two_u32, NULL,
510 iemAImpl_imul_two_u64, NULL
511};
512
513/** Group 1 /r lookup table. */
514IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
515{
516 &g_iemAImpl_add,
517 &g_iemAImpl_or,
518 &g_iemAImpl_adc,
519 &g_iemAImpl_sbb,
520 &g_iemAImpl_and,
521 &g_iemAImpl_sub,
522 &g_iemAImpl_xor,
523 &g_iemAImpl_cmp
524};
525
526/** Function table for the INC instruction. */
527IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
528{
529 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
530 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
531 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
532 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
533};
534
535/** Function table for the DEC instruction. */
536IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
537{
538 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
539 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
540 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
541 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
542};
543
544/** Function table for the NEG instruction. */
545IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
546{
547 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
548 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
549 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
550 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
551};
552
553/** Function table for the NOT instruction. */
554IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
555{
556 iemAImpl_not_u8, iemAImpl_not_u8_locked,
557 iemAImpl_not_u16, iemAImpl_not_u16_locked,
558 iemAImpl_not_u32, iemAImpl_not_u32_locked,
559 iemAImpl_not_u64, iemAImpl_not_u64_locked
560};
561
562
563/** Function table for the ROL instruction. */
564IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
565{
566 iemAImpl_rol_u8,
567 iemAImpl_rol_u16,
568 iemAImpl_rol_u32,
569 iemAImpl_rol_u64
570};
571
572/** Function table for the ROR instruction. */
573IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
574{
575 iemAImpl_ror_u8,
576 iemAImpl_ror_u16,
577 iemAImpl_ror_u32,
578 iemAImpl_ror_u64
579};
580
581/** Function table for the RCL instruction. */
582IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
583{
584 iemAImpl_rcl_u8,
585 iemAImpl_rcl_u16,
586 iemAImpl_rcl_u32,
587 iemAImpl_rcl_u64
588};
589
590/** Function table for the RCR instruction. */
591IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
592{
593 iemAImpl_rcr_u8,
594 iemAImpl_rcr_u16,
595 iemAImpl_rcr_u32,
596 iemAImpl_rcr_u64
597};
598
599/** Function table for the SHL instruction. */
600IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
601{
602 iemAImpl_shl_u8,
603 iemAImpl_shl_u16,
604 iemAImpl_shl_u32,
605 iemAImpl_shl_u64
606};
607
608/** Function table for the SHR instruction. */
609IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
610{
611 iemAImpl_shr_u8,
612 iemAImpl_shr_u16,
613 iemAImpl_shr_u32,
614 iemAImpl_shr_u64
615};
616
617/** Function table for the SAR instruction. */
618IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
619{
620 iemAImpl_sar_u8,
621 iemAImpl_sar_u16,
622 iemAImpl_sar_u32,
623 iemAImpl_sar_u64
624};
625
626
627/** Function table for the MUL instruction. */
628IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
629{
630 iemAImpl_mul_u8,
631 iemAImpl_mul_u16,
632 iemAImpl_mul_u32,
633 iemAImpl_mul_u64
634};
635
636/** Function table for the IMUL instruction working implicitly on rAX. */
637IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
638{
639 iemAImpl_imul_u8,
640 iemAImpl_imul_u16,
641 iemAImpl_imul_u32,
642 iemAImpl_imul_u64
643};
644
645/** Function table for the DIV instruction. */
646IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
647{
648 iemAImpl_div_u8,
649 iemAImpl_div_u16,
650 iemAImpl_div_u32,
651 iemAImpl_div_u64
652};
653
654/** Function table for the MUL instruction. */
655IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
656{
657 iemAImpl_idiv_u8,
658 iemAImpl_idiv_u16,
659 iemAImpl_idiv_u32,
660 iemAImpl_idiv_u64
661};
662
663/** Function table for the SHLD instruction */
664IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
665{
666 iemAImpl_shld_u16,
667 iemAImpl_shld_u32,
668 iemAImpl_shld_u64,
669};
670
671/** Function table for the SHRD instruction */
672IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
673{
674 iemAImpl_shrd_u16,
675 iemAImpl_shrd_u32,
676 iemAImpl_shrd_u64,
677};
678
679
680/** Function table for the PUNPCKLBW instruction */
681IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
682/** Function table for the PUNPCKLBD instruction */
683IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
684/** Function table for the PUNPCKLDQ instruction */
685IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
686/** Function table for the PUNPCKLQDQ instruction */
687IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
688
689/** Function table for the PUNPCKHBW instruction */
690IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
691/** Function table for the PUNPCKHBD instruction */
692IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
693/** Function table for the PUNPCKHDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
695/** Function table for the PUNPCKHQDQ instruction */
696IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
697
698/** Function table for the PXOR instruction */
699IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
700/** Function table for the PCMPEQB instruction */
701IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
702/** Function table for the PCMPEQW instruction */
703IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
704/** Function table for the PCMPEQD instruction */
705IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
706
707
708#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
709/** What IEM just wrote. */
710uint8_t g_abIemWrote[256];
711/** How much IEM just wrote. */
712size_t g_cbIemWrote;
713#endif
714
715
716/*********************************************************************************************************************************
717* Internal Functions *
718*********************************************************************************************************************************/
719IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
721IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
722IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
723/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
724IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
725IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
726IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
727IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
728IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
729IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
730IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
733IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
734IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
735IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
736#ifdef IEM_WITH_SETJMP
737DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741#endif
742
743IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
744IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
745IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
747IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
752IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
753IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
754IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
756IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
757IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
758IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
759
760#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
761IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
762#endif
763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
764IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
765
766
767
768/**
769 * Sets the pass up status.
770 *
771 * @returns VINF_SUCCESS.
772 * @param pVCpu The cross context virtual CPU structure of the
773 * calling thread.
774 * @param rcPassUp The pass up status. Must be informational.
775 * VINF_SUCCESS is not allowed.
776 */
777IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
778{
779 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
780
781 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
782 if (rcOldPassUp == VINF_SUCCESS)
783 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
784 /* If both are EM scheduling codes, use EM priority rules. */
785 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
786 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
787 {
788 if (rcPassUp < rcOldPassUp)
789 {
790 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 }
793 else
794 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
795 }
796 /* Override EM scheduling with specific status code. */
797 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
798 {
799 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
800 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
801 }
802 /* Don't override specific status code, first come first served. */
803 else
804 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
805 return VINF_SUCCESS;
806}
807
808
809/**
810 * Calculates the CPU mode.
811 *
812 * This is mainly for updating IEMCPU::enmCpuMode.
813 *
814 * @returns CPU mode.
815 * @param pCtx The register context for the CPU.
816 */
817DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
818{
819 if (CPUMIsGuestIn64BitCodeEx(pCtx))
820 return IEMMODE_64BIT;
821 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
822 return IEMMODE_32BIT;
823 return IEMMODE_16BIT;
824}
825
826
827/**
828 * Initializes the execution state.
829 *
830 * @param pVCpu The cross context virtual CPU structure of the
831 * calling thread.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 *
834 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
835 * side-effects in strict builds.
836 */
837DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
838{
839 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
840
841 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
842
843#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
852#endif
853
854#ifdef VBOX_WITH_RAW_MODE_NOT_R0
855 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
856#endif
857 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
858 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
859#ifdef VBOX_STRICT
860 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
861 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
862 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
864 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef;
865 pVCpu->iem.s.uRexReg = 127;
866 pVCpu->iem.s.uRexB = 127;
867 pVCpu->iem.s.uRexIndex = 127;
868 pVCpu->iem.s.iEffSeg = 127;
869 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
870# ifdef IEM_WITH_CODE_TLB
871 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
872 pVCpu->iem.s.pbInstrBuf = NULL;
873 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
874 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;
876 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
877# else
878 pVCpu->iem.s.offOpcode = 127;
879 pVCpu->iem.s.cbOpcode = 127;
880# endif
881#endif
882
883 pVCpu->iem.s.cActiveMappings = 0;
884 pVCpu->iem.s.iNextMapping = 0;
885 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
886 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
887#ifdef VBOX_WITH_RAW_MODE_NOT_R0
888 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
889 && pCtx->cs.u64Base == 0
890 && pCtx->cs.u32Limit == UINT32_MAX
891 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
892 if (!pVCpu->iem.s.fInPatchCode)
893 CPUMRawLeave(pVCpu, VINF_SUCCESS);
894#endif
895
896#ifdef IEM_VERIFICATION_MODE_FULL
897 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
898 pVCpu->iem.s.fNoRem = true;
899#endif
900}
901
902
903/**
904 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
905 *
906 * @param pVCpu The cross context virtual CPU structure of the
907 * calling thread.
908 */
909DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
910{
911 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
912#ifdef IEM_VERIFICATION_MODE_FULL
913 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
914#endif
915#ifdef VBOX_STRICT
916# ifdef IEM_WITH_CODE_TLB
917# else
918 pVCpu->iem.s.cbOpcode = 0;
919# endif
920#else
921 NOREF(pVCpu);
922#endif
923}
924
925
926/**
927 * Initializes the decoder state.
928 *
929 * iemReInitDecoder is mostly a copy of this function.
930 *
931 * @param pVCpu The cross context virtual CPU structure of the
932 * calling thread.
933 * @param fBypassHandlers Whether to bypass access handlers.
934 */
935DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
936{
937 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
938
939 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
940
941#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
950#endif
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
954#endif
955 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
956#ifdef IEM_VERIFICATION_MODE_FULL
957 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
958 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
959#endif
960 IEMMODE enmMode = iemCalcCpuMode(pCtx);
961 pVCpu->iem.s.enmCpuMode = enmMode;
962 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
963 pVCpu->iem.s.enmEffAddrMode = enmMode;
964 if (enmMode != IEMMODE_64BIT)
965 {
966 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
967 pVCpu->iem.s.enmEffOpSize = enmMode;
968 }
969 else
970 {
971 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
972 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
973 }
974 pVCpu->iem.s.fPrefixes = 0;
975 pVCpu->iem.s.uRexReg = 0;
976 pVCpu->iem.s.uRexB = 0;
977 pVCpu->iem.s.uRexIndex = 0;
978 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
979#ifdef IEM_WITH_CODE_TLB
980 pVCpu->iem.s.pbInstrBuf = NULL;
981 pVCpu->iem.s.offInstrNextByte = 0;
982 pVCpu->iem.s.offCurInstrStart = 0;
983# ifdef VBOX_STRICT
984 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
985 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
986 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
987# endif
988#else
989 pVCpu->iem.s.offOpcode = 0;
990 pVCpu->iem.s.cbOpcode = 0;
991#endif
992 pVCpu->iem.s.cActiveMappings = 0;
993 pVCpu->iem.s.iNextMapping = 0;
994 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
995 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
996#ifdef VBOX_WITH_RAW_MODE_NOT_R0
997 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
998 && pCtx->cs.u64Base == 0
999 && pCtx->cs.u32Limit == UINT32_MAX
1000 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1001 if (!pVCpu->iem.s.fInPatchCode)
1002 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1003#endif
1004
1005#ifdef DBGFTRACE_ENABLED
1006 switch (enmMode)
1007 {
1008 case IEMMODE_64BIT:
1009 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1010 break;
1011 case IEMMODE_32BIT:
1012 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1013 break;
1014 case IEMMODE_16BIT:
1015 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1016 break;
1017 }
1018#endif
1019}
1020
1021
1022/**
1023 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1024 *
1025 * This is mostly a copy of iemInitDecoder.
1026 *
1027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1028 */
1029DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1030{
1031 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1032
1033 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1034
1035#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1044#endif
1045
1046 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1047#ifdef IEM_VERIFICATION_MODE_FULL
1048 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1049 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1050#endif
1051 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1052 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1053 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1054 pVCpu->iem.s.enmEffAddrMode = enmMode;
1055 if (enmMode != IEMMODE_64BIT)
1056 {
1057 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1058 pVCpu->iem.s.enmEffOpSize = enmMode;
1059 }
1060 else
1061 {
1062 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1063 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1064 }
1065 pVCpu->iem.s.fPrefixes = 0;
1066 pVCpu->iem.s.uRexReg = 0;
1067 pVCpu->iem.s.uRexB = 0;
1068 pVCpu->iem.s.uRexIndex = 0;
1069 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1070#ifdef IEM_WITH_CODE_TLB
1071 if (pVCpu->iem.s.pbInstrBuf)
1072 {
1073 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1074 - pVCpu->iem.s.uInstrBufPc;
1075 if (off < pVCpu->iem.s.cbInstrBufTotal)
1076 {
1077 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1078 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1079 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1080 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1081 else
1082 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1083 }
1084 else
1085 {
1086 pVCpu->iem.s.pbInstrBuf = NULL;
1087 pVCpu->iem.s.offInstrNextByte = 0;
1088 pVCpu->iem.s.offCurInstrStart = 0;
1089 }
1090 }
1091 else
1092 {
1093 pVCpu->iem.s.offInstrNextByte = 0;
1094 pVCpu->iem.s.offCurInstrStart = 0;
1095 }
1096#else
1097 pVCpu->iem.s.cbOpcode = 0;
1098 pVCpu->iem.s.offOpcode = 0;
1099#endif
1100 Assert(pVCpu->iem.s.cActiveMappings == 0);
1101 pVCpu->iem.s.iNextMapping = 0;
1102 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1103 Assert(pVCpu->iem.s.fBypassHandlers == false);
1104#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1105 if (!pVCpu->iem.s.fInPatchCode)
1106 { /* likely */ }
1107 else
1108 {
1109 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1110 && pCtx->cs.u64Base == 0
1111 && pCtx->cs.u32Limit == UINT32_MAX
1112 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1113 if (!pVCpu->iem.s.fInPatchCode)
1114 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1115 }
1116#endif
1117
1118#ifdef DBGFTRACE_ENABLED
1119 switch (enmMode)
1120 {
1121 case IEMMODE_64BIT:
1122 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1123 break;
1124 case IEMMODE_32BIT:
1125 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1126 break;
1127 case IEMMODE_16BIT:
1128 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1129 break;
1130 }
1131#endif
1132}
1133
1134
1135
1136/**
1137 * Prefetch opcodes the first time when starting executing.
1138 *
1139 * @returns Strict VBox status code.
1140 * @param pVCpu The cross context virtual CPU structure of the
1141 * calling thread.
1142 * @param fBypassHandlers Whether to bypass access handlers.
1143 */
1144IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1145{
1146#ifdef IEM_VERIFICATION_MODE_FULL
1147 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1148#endif
1149 iemInitDecoder(pVCpu, fBypassHandlers);
1150
1151#ifdef IEM_WITH_CODE_TLB
1152 /** @todo Do ITLB lookup here. */
1153
1154#else /* !IEM_WITH_CODE_TLB */
1155
1156 /*
1157 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1158 *
1159 * First translate CS:rIP to a physical address.
1160 */
1161 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1162 uint32_t cbToTryRead;
1163 RTGCPTR GCPtrPC;
1164 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1165 {
1166 cbToTryRead = PAGE_SIZE;
1167 GCPtrPC = pCtx->rip;
1168 if (!IEM_IS_CANONICAL(GCPtrPC))
1169 return iemRaiseGeneralProtectionFault0(pVCpu);
1170 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1171 }
1172 else
1173 {
1174 uint32_t GCPtrPC32 = pCtx->eip;
1175 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1176 if (GCPtrPC32 > pCtx->cs.u32Limit)
1177 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1178 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1179 if (!cbToTryRead) /* overflowed */
1180 {
1181 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1182 cbToTryRead = UINT32_MAX;
1183 }
1184 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1185 Assert(GCPtrPC <= UINT32_MAX);
1186 }
1187
1188# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1189 /* Allow interpretation of patch manager code blocks since they can for
1190 instance throw #PFs for perfectly good reasons. */
1191 if (pVCpu->iem.s.fInPatchCode)
1192 {
1193 size_t cbRead = 0;
1194 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1195 AssertRCReturn(rc, rc);
1196 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1197 return VINF_SUCCESS;
1198 }
1199# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1200
1201 RTGCPHYS GCPhys;
1202 uint64_t fFlags;
1203 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1204 if (RT_FAILURE(rc))
1205 {
1206 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1207 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1210 {
1211 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1212 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1213 }
1214 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1215 {
1216 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1217 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1218 }
1219 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1220 /** @todo Check reserved bits and such stuff. PGM is better at doing
1221 * that, so do it when implementing the guest virtual address
1222 * TLB... */
1223
1224# ifdef IEM_VERIFICATION_MODE_FULL
1225 /*
1226 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1227 * instruction.
1228 */
1229 /** @todo optimize this differently by not using PGMPhysRead. */
1230 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1231 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1232 if ( offPrevOpcodes < cbOldOpcodes
1233 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1234 {
1235 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1236 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1237 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1238 pVCpu->iem.s.cbOpcode = cbNew;
1239 return VINF_SUCCESS;
1240 }
1241# endif
1242
1243 /*
1244 * Read the bytes at this address.
1245 */
1246 PVM pVM = pVCpu->CTX_SUFF(pVM);
1247# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1248 size_t cbActual;
1249 if ( PATMIsEnabled(pVM)
1250 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1251 {
1252 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1253 Assert(cbActual > 0);
1254 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1255 }
1256 else
1257# endif
1258 {
1259 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1260 if (cbToTryRead > cbLeftOnPage)
1261 cbToTryRead = cbLeftOnPage;
1262 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1263 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1264
1265 if (!pVCpu->iem.s.fBypassHandlers)
1266 {
1267 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1268 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1269 { /* likely */ }
1270 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1271 {
1272 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1273 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1274 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1275 }
1276 else
1277 {
1278 Log((RT_SUCCESS(rcStrict)
1279 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1280 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1281 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1282 return rcStrict;
1283 }
1284 }
1285 else
1286 {
1287 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1288 if (RT_SUCCESS(rc))
1289 { /* likely */ }
1290 else
1291 {
1292 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1293 GCPtrPC, GCPhys, rc, cbToTryRead));
1294 return rc;
1295 }
1296 }
1297 pVCpu->iem.s.cbOpcode = cbToTryRead;
1298 }
1299#endif /* !IEM_WITH_CODE_TLB */
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * Invalidates the IEM TLBs.
1306 *
1307 * This is called internally as well as by PGM when moving GC mappings.
1308 *
1309 * @returns
1310 * @param pVCpu The cross context virtual CPU structure of the calling
1311 * thread.
1312 * @param fVmm Set when PGM calls us with a remapping.
1313 */
1314void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm)
1315{
1316#ifdef IEM_WITH_CODE_TLB
1317 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1318 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1319 { /* very likely */ }
1320 else
1321 {
1322 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1323 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1324 while (i-- > 0)
1325 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1326 }
1327#endif
1328
1329#ifdef IEM_WITH_DATA_TLB
1330 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1331 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1332 { /* very likely */ }
1333 else
1334 {
1335 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1336 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1337 while (i-- > 0)
1338 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1339 }
1340#endif
1341 NOREF(pVCpu); NOREF(fVmm);
1342}
1343
1344
1345/**
1346 * Invalidates the host physical aspects of the IEM TLBs.
1347 *
1348 * This is called internally as well as by PGM when moving GC mappings.
1349 *
1350 * @returns
1351 * @param pVCpu The cross context virtual CPU structure of the calling
1352 * thread.
1353 */
1354void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush)
1355{
1356#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1357 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1358
1359 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1360 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1361
1362 if (!fFlushFlush)
1363 { /* very likely */ }
1364 else
1365 {
1366 unsigned i;
1367# ifdef IEM_WITH_CODE_TLB
1368 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1369 while (i-- > 0)
1370 {
1371 pVCpu->iem.s.CodeTlb.aEntries[i].pMappingR3 = NULL;
1372 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV)
1373 }
1374# endif
1375# ifdef IEM_WITH_DATA_TLB
1376 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1377 while (i-- > 0)
1378 {
1379 pVCpu->iem.s.DataTlb.aEntries[i].pMappingR3 = NULL;
1380 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV)
1381 }
1382# endif
1383 }
1384#endif
1385 NOREF(pVCpu); NOREF(fFullFlush);
1386}
1387
1388
1389#ifdef IEM_WITH_CODE_TLB
1390
1391/**
1392 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1393 * failure and jumps.
1394 *
1395 * We end up here for a number of reasons:
1396 * - pbInstrBuf isn't yet initialized.
1397 * - Advancing beyond the buffer boundrary (e.g. cross page).
1398 * - Advancing beyond the CS segment limit.
1399 * - Fetching from non-mappable page (e.g. MMIO).
1400 *
1401 * @param pVCpu The cross context virtual CPU structure of the
1402 * calling thread.
1403 * @param pvDst Where to return the bytes.
1404 * @param cbDst Number of bytes to read.
1405 *
1406 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1407 */
1408IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1409{
1410 Assert(cbDst <= 8);
1411 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1412
1413 /*
1414 * We might have a partial buffer match, deal with that first to make the
1415 * rest simpler. This is the first part of the cross page/buffer case.
1416 */
1417 if (pVCpu->iem.s.pbInstrBuf != NULL)
1418 {
1419 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1420 {
1421 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1422 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1423 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1424
1425 cbDst -= cbCopy;
1426 pvDst = (uint8_t *)pvDst + cbCopy;
1427 offBuf += cbCopy;
1428 pVCpu->iem.s.offInstrNextByte += offBuf;
1429 }
1430 }
1431
1432 /*
1433 * Check segment limit, figuring how much we're allowed to access at this point.
1434 */
1435 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1436 RTGCPTR GCPtrFirst;
1437 uint32_t cbMaxRead;
1438 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1439 {
1440 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1441 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1442 { /* likely */ }
1443 else
1444 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1445 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1446 }
1447 else
1448 {
1449 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1450 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1451 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1452 { /* likely */ }
1453 else
1454 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1455 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1456 if (cbMaxRead != 0)
1457 { /* likely */ }
1458 else
1459 {
1460 /* Overflowed because address is 0 and limit is max. */
1461 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1462 cbMaxRead = X86_PAGE_SIZE;
1463 }
1464 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1465 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1466 if (cbMaxRead2 < cbMaxRead)
1467 cbMaxRead = cbMaxRead2;
1468 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1469 }
1470
1471 /*
1472 * Get the TLB entry for this piece of code.
1473 */
1474 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1475 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1476 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1477 if (pTlbe->uTag == uTag)
1478 {
1479 /* likely when executing lots of code, otherwise unlikely */
1480# ifdef VBOX_WITH_STATISTICS
1481 pVCpu->iem.s.CodeTlb.cTlbHits++;
1482# endif
1483 }
1484 else
1485 {
1486 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1487 pVCpu->iem.s.CodeTlb.cTlbMissesTag++;
1488# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1489 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1490 {
1491 pTlbe->uTag = uTag;
1492 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1493 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1494 pTlbe->GCPhys = NIL_RTGCPHYS;
1495 pTlbe->pMappingR3 = NULL;
1496 }
1497 else
1498# endif
1499 {
1500 RTGCPHYS GCPhys;
1501 uint64_t fFlags;
1502 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1503 if (RT_FAILURE(rc))
1504 {
1505 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1506 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1507 }
1508
1509 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1510 pTlbe->uTag = uTag;
1511 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1512 pTlbe->GCPhys = GCPhys;
1513 pTlbe->pMappingR3 = NULL;
1514 }
1515 }
1516
1517 /*
1518 * Check TLB access flags.
1519 */
1520 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1521 {
1522 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1523 {
1524 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1525 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1526 }
1527 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1528 {
1529 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1530 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1531 }
1532 }
1533
1534# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1535 /*
1536 * Allow interpretation of patch manager code blocks since they can for
1537 * instance throw #PFs for perfectly good reasons.
1538 */
1539 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1540 { /* no unlikely */ }
1541 else
1542 {
1543
1544 }
1545
1546# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1547
1548# if 0
1549
1550# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1551 /* Allow interpretation of patch manager code blocks since they can for
1552 instance throw #PFs for perfectly good reasons. */
1553 if (pVCpu->iem.s.fInPatchCode)
1554 {
1555 size_t cbRead = 0;
1556 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1557 AssertRCReturn(rc, rc);
1558 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1559 return VINF_SUCCESS;
1560 }
1561# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1562
1563 RTGCPHYS GCPhys;
1564 uint64_t fFlags;
1565 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1566 if (RT_FAILURE(rc))
1567 {
1568 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1569 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1570 }
1571 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1572 {
1573 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1574 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1575 }
1576 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1577 {
1578 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1579 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1580 }
1581 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1582 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1583 /** @todo Check reserved bits and such stuff. PGM is better at doing
1584 * that, so do it when implementing the guest virtual address
1585 * TLB... */
1586
1587 /*
1588 * Read the bytes at this address.
1589 *
1590 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1591 * and since PATM should only patch the start of an instruction there
1592 * should be no need to check again here.
1593 */
1594 if (!pVCpu->iem.s.fBypassHandlers)
1595 {
1596 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1597 cbToTryRead, PGMACCESSORIGIN_IEM);
1598 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1599 { /* likely */ }
1600 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1601 {
1602 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1603 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1604 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1605 }
1606 else
1607 {
1608 Log((RT_SUCCESS(rcStrict)
1609 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1610 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1611 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1612 return rcStrict;
1613 }
1614 }
1615 else
1616 {
1617 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1618 if (RT_SUCCESS(rc))
1619 { /* likely */ }
1620 else
1621 {
1622 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1623 return rc;
1624 }
1625 }
1626 pVCpu->iem.s.cbOpcode += cbToTryRead;
1627 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1628# endif
1629}
1630
1631#else
1632
1633/**
1634 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1635 * exception if it fails.
1636 *
1637 * @returns Strict VBox status code.
1638 * @param pVCpu The cross context virtual CPU structure of the
1639 * calling thread.
1640 * @param cbMin The minimum number of bytes relative offOpcode
1641 * that must be read.
1642 */
1643IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1644{
1645 /*
1646 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1647 *
1648 * First translate CS:rIP to a physical address.
1649 */
1650 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1651 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1652 uint32_t cbToTryRead;
1653 RTGCPTR GCPtrNext;
1654 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1655 {
1656 cbToTryRead = PAGE_SIZE;
1657 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1658 if (!IEM_IS_CANONICAL(GCPtrNext))
1659 return iemRaiseGeneralProtectionFault0(pVCpu);
1660 }
1661 else
1662 {
1663 uint32_t GCPtrNext32 = pCtx->eip;
1664 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1665 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1666 if (GCPtrNext32 > pCtx->cs.u32Limit)
1667 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1668 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1669 if (!cbToTryRead) /* overflowed */
1670 {
1671 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1672 cbToTryRead = UINT32_MAX;
1673 /** @todo check out wrapping around the code segment. */
1674 }
1675 if (cbToTryRead < cbMin - cbLeft)
1676 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1677 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1678 }
1679
1680 /* Only read up to the end of the page, and make sure we don't read more
1681 than the opcode buffer can hold. */
1682 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1683 if (cbToTryRead > cbLeftOnPage)
1684 cbToTryRead = cbLeftOnPage;
1685 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1686 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1687/** @todo r=bird: Convert assertion into undefined opcode exception? */
1688 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1689
1690# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1691 /* Allow interpretation of patch manager code blocks since they can for
1692 instance throw #PFs for perfectly good reasons. */
1693 if (pVCpu->iem.s.fInPatchCode)
1694 {
1695 size_t cbRead = 0;
1696 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1697 AssertRCReturn(rc, rc);
1698 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1699 return VINF_SUCCESS;
1700 }
1701# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1702
1703 RTGCPHYS GCPhys;
1704 uint64_t fFlags;
1705 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1706 if (RT_FAILURE(rc))
1707 {
1708 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1709 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1710 }
1711 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1712 {
1713 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1714 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1715 }
1716 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1717 {
1718 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1719 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1720 }
1721 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1722 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1723 /** @todo Check reserved bits and such stuff. PGM is better at doing
1724 * that, so do it when implementing the guest virtual address
1725 * TLB... */
1726
1727 /*
1728 * Read the bytes at this address.
1729 *
1730 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1731 * and since PATM should only patch the start of an instruction there
1732 * should be no need to check again here.
1733 */
1734 if (!pVCpu->iem.s.fBypassHandlers)
1735 {
1736 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1737 cbToTryRead, PGMACCESSORIGIN_IEM);
1738 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1739 { /* likely */ }
1740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1741 {
1742 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1743 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1744 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1745 }
1746 else
1747 {
1748 Log((RT_SUCCESS(rcStrict)
1749 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1750 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1751 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1752 return rcStrict;
1753 }
1754 }
1755 else
1756 {
1757 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1758 if (RT_SUCCESS(rc))
1759 { /* likely */ }
1760 else
1761 {
1762 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1763 return rc;
1764 }
1765 }
1766 pVCpu->iem.s.cbOpcode += cbToTryRead;
1767 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1768
1769 return VINF_SUCCESS;
1770}
1771
1772#endif /* !IEM_WITH_CODE_TLB */
1773#ifndef IEM_WITH_SETJMP
1774
1775/**
1776 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pVCpu The cross context virtual CPU structure of the
1780 * calling thread.
1781 * @param pb Where to return the opcode byte.
1782 */
1783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1784{
1785 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1786 if (rcStrict == VINF_SUCCESS)
1787 {
1788 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1789 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1790 pVCpu->iem.s.offOpcode = offOpcode + 1;
1791 }
1792 else
1793 *pb = 0;
1794 return rcStrict;
1795}
1796
1797
1798/**
1799 * Fetches the next opcode byte.
1800 *
1801 * @returns Strict VBox status code.
1802 * @param pVCpu The cross context virtual CPU structure of the
1803 * calling thread.
1804 * @param pu8 Where to return the opcode byte.
1805 */
1806DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1807{
1808 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1809 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1810 {
1811 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1812 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1813 return VINF_SUCCESS;
1814 }
1815 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1816}
1817
1818#else /* IEM_WITH_SETJMP */
1819
1820/**
1821 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1822 *
1823 * @returns The opcode byte.
1824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1825 */
1826DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1827{
1828# ifdef IEM_WITH_CODE_TLB
1829 uint8_t u8;
1830 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1831 return u8;
1832# else
1833 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1834 if (rcStrict == VINF_SUCCESS)
1835 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1836 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1837# endif
1838}
1839
1840
1841/**
1842 * Fetches the next opcode byte, longjmp on error.
1843 *
1844 * @returns The opcode byte.
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 */
1847DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1848{
1849# ifdef IEM_WITH_CODE_TLB
1850 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1851 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1852 if (RT_LIKELY( pbBuf != NULL
1853 && offBuf < pVCpu->iem.s.cbInstrBuf))
1854 {
1855 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1856 return pbBuf[offBuf];
1857 }
1858# else
1859 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1860 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1861 {
1862 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1863 return pVCpu->iem.s.abOpcode[offOpcode];
1864 }
1865# endif
1866 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1867}
1868
1869#endif /* IEM_WITH_SETJMP */
1870
1871/**
1872 * Fetches the next opcode byte, returns automatically on failure.
1873 *
1874 * @param a_pu8 Where to return the opcode byte.
1875 * @remark Implicitly references pVCpu.
1876 */
1877#ifndef IEM_WITH_SETJMP
1878# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1879 do \
1880 { \
1881 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1882 if (rcStrict2 == VINF_SUCCESS) \
1883 { /* likely */ } \
1884 else \
1885 return rcStrict2; \
1886 } while (0)
1887#else
1888# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
1889#endif /* IEM_WITH_SETJMP */
1890
1891
1892#ifndef IEM_WITH_SETJMP
1893/**
1894 * Fetches the next signed byte from the opcode stream.
1895 *
1896 * @returns Strict VBox status code.
1897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1898 * @param pi8 Where to return the signed byte.
1899 */
1900DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
1901{
1902 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
1903}
1904#endif /* !IEM_WITH_SETJMP */
1905
1906
1907/**
1908 * Fetches the next signed byte from the opcode stream, returning automatically
1909 * on failure.
1910 *
1911 * @param a_pi8 Where to return the signed byte.
1912 * @remark Implicitly references pVCpu.
1913 */
1914#ifndef IEM_WITH_SETJMP
1915# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1916 do \
1917 { \
1918 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
1919 if (rcStrict2 != VINF_SUCCESS) \
1920 return rcStrict2; \
1921 } while (0)
1922#else /* IEM_WITH_SETJMP */
1923# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1924
1925#endif /* IEM_WITH_SETJMP */
1926
1927#ifndef IEM_WITH_SETJMP
1928
1929/**
1930 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1931 *
1932 * @returns Strict VBox status code.
1933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1934 * @param pu16 Where to return the opcode dword.
1935 */
1936DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
1937{
1938 uint8_t u8;
1939 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1940 if (rcStrict == VINF_SUCCESS)
1941 *pu16 = (int8_t)u8;
1942 return rcStrict;
1943}
1944
1945
1946/**
1947 * Fetches the next signed byte from the opcode stream, extending it to
1948 * unsigned 16-bit.
1949 *
1950 * @returns Strict VBox status code.
1951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1952 * @param pu16 Where to return the unsigned word.
1953 */
1954DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
1955{
1956 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1957 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1958 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
1959
1960 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1961 pVCpu->iem.s.offOpcode = offOpcode + 1;
1962 return VINF_SUCCESS;
1963}
1964
1965#endif /* !IEM_WITH_SETJMP */
1966
1967/**
1968 * Fetches the next signed byte from the opcode stream and sign-extending it to
1969 * a word, returning automatically on failure.
1970 *
1971 * @param a_pu16 Where to return the word.
1972 * @remark Implicitly references pVCpu.
1973 */
1974#ifndef IEM_WITH_SETJMP
1975# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1976 do \
1977 { \
1978 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
1979 if (rcStrict2 != VINF_SUCCESS) \
1980 return rcStrict2; \
1981 } while (0)
1982#else
1983# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1984#endif
1985
1986#ifndef IEM_WITH_SETJMP
1987
1988/**
1989 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1990 *
1991 * @returns Strict VBox status code.
1992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1993 * @param pu32 Where to return the opcode dword.
1994 */
1995DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
1996{
1997 uint8_t u8;
1998 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1999 if (rcStrict == VINF_SUCCESS)
2000 *pu32 = (int8_t)u8;
2001 return rcStrict;
2002}
2003
2004
2005/**
2006 * Fetches the next signed byte from the opcode stream, extending it to
2007 * unsigned 32-bit.
2008 *
2009 * @returns Strict VBox status code.
2010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2011 * @param pu32 Where to return the unsigned dword.
2012 */
2013DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2014{
2015 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2016 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2017 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2018
2019 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2020 pVCpu->iem.s.offOpcode = offOpcode + 1;
2021 return VINF_SUCCESS;
2022}
2023
2024#endif /* !IEM_WITH_SETJMP */
2025
2026/**
2027 * Fetches the next signed byte from the opcode stream and sign-extending it to
2028 * a word, returning automatically on failure.
2029 *
2030 * @param a_pu32 Where to return the word.
2031 * @remark Implicitly references pVCpu.
2032 */
2033#ifndef IEM_WITH_SETJMP
2034#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2035 do \
2036 { \
2037 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2038 if (rcStrict2 != VINF_SUCCESS) \
2039 return rcStrict2; \
2040 } while (0)
2041#else
2042# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2043#endif
2044
2045#ifndef IEM_WITH_SETJMP
2046
2047/**
2048 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2049 *
2050 * @returns Strict VBox status code.
2051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2052 * @param pu64 Where to return the opcode qword.
2053 */
2054DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2055{
2056 uint8_t u8;
2057 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2058 if (rcStrict == VINF_SUCCESS)
2059 *pu64 = (int8_t)u8;
2060 return rcStrict;
2061}
2062
2063
2064/**
2065 * Fetches the next signed byte from the opcode stream, extending it to
2066 * unsigned 64-bit.
2067 *
2068 * @returns Strict VBox status code.
2069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2070 * @param pu64 Where to return the unsigned qword.
2071 */
2072DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2073{
2074 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2075 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2076 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2077
2078 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2079 pVCpu->iem.s.offOpcode = offOpcode + 1;
2080 return VINF_SUCCESS;
2081}
2082
2083#endif /* !IEM_WITH_SETJMP */
2084
2085
2086/**
2087 * Fetches the next signed byte from the opcode stream and sign-extending it to
2088 * a word, returning automatically on failure.
2089 *
2090 * @param a_pu64 Where to return the word.
2091 * @remark Implicitly references pVCpu.
2092 */
2093#ifndef IEM_WITH_SETJMP
2094# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2095 do \
2096 { \
2097 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2098 if (rcStrict2 != VINF_SUCCESS) \
2099 return rcStrict2; \
2100 } while (0)
2101#else
2102# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2103#endif
2104
2105
2106#ifndef IEM_WITH_SETJMP
2107
2108/**
2109 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2113 * @param pu16 Where to return the opcode word.
2114 */
2115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2116{
2117 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2118 if (rcStrict == VINF_SUCCESS)
2119 {
2120 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2121# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2122 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2123# else
2124 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2125# endif
2126 pVCpu->iem.s.offOpcode = offOpcode + 2;
2127 }
2128 else
2129 *pu16 = 0;
2130 return rcStrict;
2131}
2132
2133
2134/**
2135 * Fetches the next opcode word.
2136 *
2137 * @returns Strict VBox status code.
2138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2139 * @param pu16 Where to return the opcode word.
2140 */
2141DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2142{
2143 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2144 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2145 {
2146 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2147# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2148 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2149# else
2150 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2151# endif
2152 return VINF_SUCCESS;
2153 }
2154 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2155}
2156
2157#else /* IEM_WITH_SETJMP */
2158
2159/**
2160 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2161 *
2162 * @returns The opcode word.
2163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2164 */
2165DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2166{
2167# ifdef IEM_WITH_CODE_TLB
2168 uint16_t u16;
2169 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2170 return u16;
2171# else
2172 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2173 if (rcStrict == VINF_SUCCESS)
2174 {
2175 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2176 pVCpu->iem.s.offOpcode += 2;
2177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2178 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2179# else
2180 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2181# endif
2182 }
2183 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2184# endif
2185}
2186
2187
2188/**
2189 * Fetches the next opcode word, longjmp on error.
2190 *
2191 * @returns The opcode word.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 */
2194DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2195{
2196# ifdef IEM_WITH_CODE_TLB
2197 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2198 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2199 if (RT_LIKELY( pbBuf != NULL
2200 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2201 {
2202 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2203# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2204 return *(uint16_t const *)&pbBuf[offBuf];
2205# else
2206 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2207# endif
2208 }
2209# else
2210 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2211 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2212 {
2213 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2214# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2215 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2216# else
2217 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2218# endif
2219 }
2220# endif
2221 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2222}
2223
2224#endif /* IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next opcode word, returns automatically on failure.
2229 *
2230 * @param a_pu16 Where to return the opcode word.
2231 * @remark Implicitly references pVCpu.
2232 */
2233#ifndef IEM_WITH_SETJMP
2234# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2235 do \
2236 { \
2237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2238 if (rcStrict2 != VINF_SUCCESS) \
2239 return rcStrict2; \
2240 } while (0)
2241#else
2242# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2243#endif
2244
2245#ifndef IEM_WITH_SETJMP
2246
2247/**
2248 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2249 *
2250 * @returns Strict VBox status code.
2251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2252 * @param pu32 Where to return the opcode double word.
2253 */
2254DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2255{
2256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2257 if (rcStrict == VINF_SUCCESS)
2258 {
2259 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2260 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2261 pVCpu->iem.s.offOpcode = offOpcode + 2;
2262 }
2263 else
2264 *pu32 = 0;
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Fetches the next opcode word, zero extending it to a double word.
2271 *
2272 * @returns Strict VBox status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param pu32 Where to return the opcode double word.
2275 */
2276DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2277{
2278 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2279 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2280 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2281
2282 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2283 pVCpu->iem.s.offOpcode = offOpcode + 2;
2284 return VINF_SUCCESS;
2285}
2286
2287#endif /* !IEM_WITH_SETJMP */
2288
2289
2290/**
2291 * Fetches the next opcode word and zero extends it to a double word, returns
2292 * automatically on failure.
2293 *
2294 * @param a_pu32 Where to return the opcode double word.
2295 * @remark Implicitly references pVCpu.
2296 */
2297#ifndef IEM_WITH_SETJMP
2298# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2299 do \
2300 { \
2301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2302 if (rcStrict2 != VINF_SUCCESS) \
2303 return rcStrict2; \
2304 } while (0)
2305#else
2306# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2307#endif
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu64 Where to return the opcode quad word.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2319{
2320 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2324 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2325 pVCpu->iem.s.offOpcode = offOpcode + 2;
2326 }
2327 else
2328 *pu64 = 0;
2329 return rcStrict;
2330}
2331
2332
2333/**
2334 * Fetches the next opcode word, zero extending it to a quad word.
2335 *
2336 * @returns Strict VBox status code.
2337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2338 * @param pu64 Where to return the opcode quad word.
2339 */
2340DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2341{
2342 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2343 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2344 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2345
2346 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2347 pVCpu->iem.s.offOpcode = offOpcode + 2;
2348 return VINF_SUCCESS;
2349}
2350
2351#endif /* !IEM_WITH_SETJMP */
2352
2353/**
2354 * Fetches the next opcode word and zero extends it to a quad word, returns
2355 * automatically on failure.
2356 *
2357 * @param a_pu64 Where to return the opcode quad word.
2358 * @remark Implicitly references pVCpu.
2359 */
2360#ifndef IEM_WITH_SETJMP
2361# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2362 do \
2363 { \
2364 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2365 if (rcStrict2 != VINF_SUCCESS) \
2366 return rcStrict2; \
2367 } while (0)
2368#else
2369# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2370#endif
2371
2372
2373#ifndef IEM_WITH_SETJMP
2374/**
2375 * Fetches the next signed word from the opcode stream.
2376 *
2377 * @returns Strict VBox status code.
2378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2379 * @param pi16 Where to return the signed word.
2380 */
2381DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2382{
2383 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2384}
2385#endif /* !IEM_WITH_SETJMP */
2386
2387
2388/**
2389 * Fetches the next signed word from the opcode stream, returning automatically
2390 * on failure.
2391 *
2392 * @param a_pi16 Where to return the signed word.
2393 * @remark Implicitly references pVCpu.
2394 */
2395#ifndef IEM_WITH_SETJMP
2396# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2397 do \
2398 { \
2399 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2400 if (rcStrict2 != VINF_SUCCESS) \
2401 return rcStrict2; \
2402 } while (0)
2403#else
2404# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2405#endif
2406
2407#ifndef IEM_WITH_SETJMP
2408
2409/**
2410 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2411 *
2412 * @returns Strict VBox status code.
2413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2414 * @param pu32 Where to return the opcode dword.
2415 */
2416DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2417{
2418 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2419 if (rcStrict == VINF_SUCCESS)
2420 {
2421 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2422# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2423 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2424# else
2425 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2426 pVCpu->iem.s.abOpcode[offOpcode + 1],
2427 pVCpu->iem.s.abOpcode[offOpcode + 2],
2428 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2429# endif
2430 pVCpu->iem.s.offOpcode = offOpcode + 4;
2431 }
2432 else
2433 *pu32 = 0;
2434 return rcStrict;
2435}
2436
2437
2438/**
2439 * Fetches the next opcode dword.
2440 *
2441 * @returns Strict VBox status code.
2442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2443 * @param pu32 Where to return the opcode double word.
2444 */
2445DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2446{
2447 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2448 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2449 {
2450 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2452 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2453# else
2454 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2455 pVCpu->iem.s.abOpcode[offOpcode + 1],
2456 pVCpu->iem.s.abOpcode[offOpcode + 2],
2457 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2458# endif
2459 return VINF_SUCCESS;
2460 }
2461 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2462}
2463
2464#else /* !IEM_WITH_SETJMP */
2465
2466/**
2467 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2468 *
2469 * @returns The opcode dword.
2470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2471 */
2472DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2473{
2474# ifdef IEM_WITH_CODE_TLB
2475 uint32_t u32;
2476 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2477 return u32;
2478# else
2479 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2480 if (rcStrict == VINF_SUCCESS)
2481 {
2482 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offOpcode = offOpcode + 4;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2488 pVCpu->iem.s.abOpcode[offOpcode + 1],
2489 pVCpu->iem.s.abOpcode[offOpcode + 2],
2490 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode dword, longjmp on error.
2500 *
2501 * @returns The opcode dword.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint32_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2517 pbBuf[offBuf + 1],
2518 pbBuf[offBuf + 2],
2519 pbBuf[offBuf + 3]);
2520# endif
2521 }
2522# else
2523 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2524 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2525 {
2526 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2527# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2528 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2529# else
2530 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2531 pVCpu->iem.s.abOpcode[offOpcode + 1],
2532 pVCpu->iem.s.abOpcode[offOpcode + 2],
2533 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2534# endif
2535 }
2536# endif
2537 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2538}
2539
2540#endif /* !IEM_WITH_SETJMP */
2541
2542
2543/**
2544 * Fetches the next opcode dword, returns automatically on failure.
2545 *
2546 * @param a_pu32 Where to return the opcode dword.
2547 * @remark Implicitly references pVCpu.
2548 */
2549#ifndef IEM_WITH_SETJMP
2550# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2551 do \
2552 { \
2553 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2554 if (rcStrict2 != VINF_SUCCESS) \
2555 return rcStrict2; \
2556 } while (0)
2557#else
2558# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2559#endif
2560
2561#ifndef IEM_WITH_SETJMP
2562
2563/**
2564 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2565 *
2566 * @returns Strict VBox status code.
2567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2568 * @param pu64 Where to return the opcode dword.
2569 */
2570DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2571{
2572 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2573 if (rcStrict == VINF_SUCCESS)
2574 {
2575 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2576 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2577 pVCpu->iem.s.abOpcode[offOpcode + 1],
2578 pVCpu->iem.s.abOpcode[offOpcode + 2],
2579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2580 pVCpu->iem.s.offOpcode = offOpcode + 4;
2581 }
2582 else
2583 *pu64 = 0;
2584 return rcStrict;
2585}
2586
2587
2588/**
2589 * Fetches the next opcode dword, zero extending it to a quad word.
2590 *
2591 * @returns Strict VBox status code.
2592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2593 * @param pu64 Where to return the opcode quad word.
2594 */
2595DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2596{
2597 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2598 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2599 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2600
2601 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2602 pVCpu->iem.s.abOpcode[offOpcode + 1],
2603 pVCpu->iem.s.abOpcode[offOpcode + 2],
2604 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2605 pVCpu->iem.s.offOpcode = offOpcode + 4;
2606 return VINF_SUCCESS;
2607}
2608
2609#endif /* !IEM_WITH_SETJMP */
2610
2611
2612/**
2613 * Fetches the next opcode dword and zero extends it to a quad word, returns
2614 * automatically on failure.
2615 *
2616 * @param a_pu64 Where to return the opcode quad word.
2617 * @remark Implicitly references pVCpu.
2618 */
2619#ifndef IEM_WITH_SETJMP
2620# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2621 do \
2622 { \
2623 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2624 if (rcStrict2 != VINF_SUCCESS) \
2625 return rcStrict2; \
2626 } while (0)
2627#else
2628# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2629#endif
2630
2631
2632#ifndef IEM_WITH_SETJMP
2633/**
2634 * Fetches the next signed double word from the opcode stream.
2635 *
2636 * @returns Strict VBox status code.
2637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2638 * @param pi32 Where to return the signed double word.
2639 */
2640DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2641{
2642 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2643}
2644#endif
2645
2646/**
2647 * Fetches the next signed double word from the opcode stream, returning
2648 * automatically on failure.
2649 *
2650 * @param a_pi32 Where to return the signed double word.
2651 * @remark Implicitly references pVCpu.
2652 */
2653#ifndef IEM_WITH_SETJMP
2654# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2655 do \
2656 { \
2657 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2658 if (rcStrict2 != VINF_SUCCESS) \
2659 return rcStrict2; \
2660 } while (0)
2661#else
2662# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2663#endif
2664
2665#ifndef IEM_WITH_SETJMP
2666
2667/**
2668 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2669 *
2670 * @returns Strict VBox status code.
2671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2672 * @param pu64 Where to return the opcode qword.
2673 */
2674DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2675{
2676 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2677 if (rcStrict == VINF_SUCCESS)
2678 {
2679 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2680 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2681 pVCpu->iem.s.abOpcode[offOpcode + 1],
2682 pVCpu->iem.s.abOpcode[offOpcode + 2],
2683 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2684 pVCpu->iem.s.offOpcode = offOpcode + 4;
2685 }
2686 else
2687 *pu64 = 0;
2688 return rcStrict;
2689}
2690
2691
2692/**
2693 * Fetches the next opcode dword, sign extending it into a quad word.
2694 *
2695 * @returns Strict VBox status code.
2696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2697 * @param pu64 Where to return the opcode quad word.
2698 */
2699DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2700{
2701 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2702 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2703 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2704
2705 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2706 pVCpu->iem.s.abOpcode[offOpcode + 1],
2707 pVCpu->iem.s.abOpcode[offOpcode + 2],
2708 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2709 *pu64 = i32;
2710 pVCpu->iem.s.offOpcode = offOpcode + 4;
2711 return VINF_SUCCESS;
2712}
2713
2714#endif /* !IEM_WITH_SETJMP */
2715
2716
2717/**
2718 * Fetches the next opcode double word and sign extends it to a quad word,
2719 * returns automatically on failure.
2720 *
2721 * @param a_pu64 Where to return the opcode quad word.
2722 * @remark Implicitly references pVCpu.
2723 */
2724#ifndef IEM_WITH_SETJMP
2725# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2726 do \
2727 { \
2728 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2729 if (rcStrict2 != VINF_SUCCESS) \
2730 return rcStrict2; \
2731 } while (0)
2732#else
2733# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2734#endif
2735
2736#ifndef IEM_WITH_SETJMP
2737
2738/**
2739 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2740 *
2741 * @returns Strict VBox status code.
2742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2743 * @param pu64 Where to return the opcode qword.
2744 */
2745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2746{
2747 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2748 if (rcStrict == VINF_SUCCESS)
2749 {
2750 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2751# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2752 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2753# else
2754 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2755 pVCpu->iem.s.abOpcode[offOpcode + 1],
2756 pVCpu->iem.s.abOpcode[offOpcode + 2],
2757 pVCpu->iem.s.abOpcode[offOpcode + 3],
2758 pVCpu->iem.s.abOpcode[offOpcode + 4],
2759 pVCpu->iem.s.abOpcode[offOpcode + 5],
2760 pVCpu->iem.s.abOpcode[offOpcode + 6],
2761 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2762# endif
2763 pVCpu->iem.s.offOpcode = offOpcode + 8;
2764 }
2765 else
2766 *pu64 = 0;
2767 return rcStrict;
2768}
2769
2770
2771/**
2772 * Fetches the next opcode qword.
2773 *
2774 * @returns Strict VBox status code.
2775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2776 * @param pu64 Where to return the opcode qword.
2777 */
2778DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2779{
2780 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2781 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2782 {
2783# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2784 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2785# else
2786 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2787 pVCpu->iem.s.abOpcode[offOpcode + 1],
2788 pVCpu->iem.s.abOpcode[offOpcode + 2],
2789 pVCpu->iem.s.abOpcode[offOpcode + 3],
2790 pVCpu->iem.s.abOpcode[offOpcode + 4],
2791 pVCpu->iem.s.abOpcode[offOpcode + 5],
2792 pVCpu->iem.s.abOpcode[offOpcode + 6],
2793 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2794# endif
2795 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2796 return VINF_SUCCESS;
2797 }
2798 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2799}
2800
2801#else /* IEM_WITH_SETJMP */
2802
2803/**
2804 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2805 *
2806 * @returns The opcode qword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uint64_t u64;
2813 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2814 return u64;
2815# else
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820 pVCpu->iem.s.offOpcode = offOpcode + 8;
2821# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2822 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2823# else
2824 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2825 pVCpu->iem.s.abOpcode[offOpcode + 1],
2826 pVCpu->iem.s.abOpcode[offOpcode + 2],
2827 pVCpu->iem.s.abOpcode[offOpcode + 3],
2828 pVCpu->iem.s.abOpcode[offOpcode + 4],
2829 pVCpu->iem.s.abOpcode[offOpcode + 5],
2830 pVCpu->iem.s.abOpcode[offOpcode + 6],
2831 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2832# endif
2833 }
2834 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2835# endif
2836}
2837
2838
2839/**
2840 * Fetches the next opcode qword, longjmp on error.
2841 *
2842 * @returns The opcode qword.
2843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2844 */
2845DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2846{
2847# ifdef IEM_WITH_CODE_TLB
2848 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2849 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2850 if (RT_LIKELY( pbBuf != NULL
2851 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2852 {
2853 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2854# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2855 return *(uint64_t const *)&pbBuf[offBuf];
2856# else
2857 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2858 pbBuf[offBuf + 1],
2859 pbBuf[offBuf + 2],
2860 pbBuf[offBuf + 3],
2861 pbBuf[offBuf + 4],
2862 pbBuf[offBuf + 5],
2863 pbBuf[offBuf + 6],
2864 pbBuf[offBuf + 7]);
2865# endif
2866 }
2867# else
2868 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2869 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2870 {
2871 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2872# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2873 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2874# else
2875 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2876 pVCpu->iem.s.abOpcode[offOpcode + 1],
2877 pVCpu->iem.s.abOpcode[offOpcode + 2],
2878 pVCpu->iem.s.abOpcode[offOpcode + 3],
2879 pVCpu->iem.s.abOpcode[offOpcode + 4],
2880 pVCpu->iem.s.abOpcode[offOpcode + 5],
2881 pVCpu->iem.s.abOpcode[offOpcode + 6],
2882 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2883# endif
2884 }
2885# endif
2886 return iemOpcodeGetNextU64SlowJmp(pVCpu);
2887}
2888
2889#endif /* IEM_WITH_SETJMP */
2890
2891/**
2892 * Fetches the next opcode quad word, returns automatically on failure.
2893 *
2894 * @param a_pu64 Where to return the opcode quad word.
2895 * @remark Implicitly references pVCpu.
2896 */
2897#ifndef IEM_WITH_SETJMP
2898# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
2899 do \
2900 { \
2901 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
2902 if (rcStrict2 != VINF_SUCCESS) \
2903 return rcStrict2; \
2904 } while (0)
2905#else
2906# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
2907#endif
2908
2909
2910/** @name Misc Worker Functions.
2911 * @{
2912 */
2913
2914
2915/**
2916 * Validates a new SS segment.
2917 *
2918 * @returns VBox strict status code.
2919 * @param pVCpu The cross context virtual CPU structure of the
2920 * calling thread.
2921 * @param pCtx The CPU context.
2922 * @param NewSS The new SS selctor.
2923 * @param uCpl The CPL to load the stack for.
2924 * @param pDesc Where to return the descriptor.
2925 */
2926IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2927{
2928 NOREF(pCtx);
2929
2930 /* Null selectors are not allowed (we're not called for dispatching
2931 interrupts with SS=0 in long mode). */
2932 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2933 {
2934 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2935 return iemRaiseTaskSwitchFault0(pVCpu);
2936 }
2937
2938 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2939 if ((NewSS & X86_SEL_RPL) != uCpl)
2940 {
2941 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2942 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2943 }
2944
2945 /*
2946 * Read the descriptor.
2947 */
2948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2949 if (rcStrict != VINF_SUCCESS)
2950 return rcStrict;
2951
2952 /*
2953 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2954 */
2955 if (!pDesc->Legacy.Gen.u1DescType)
2956 {
2957 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2958 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2959 }
2960
2961 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2962 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2963 {
2964 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2965 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2966 }
2967 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2968 {
2969 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2970 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2971 }
2972
2973 /* Is it there? */
2974 /** @todo testcase: Is this checked before the canonical / limit check below? */
2975 if (!pDesc->Legacy.Gen.u1Present)
2976 {
2977 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2978 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2979 }
2980
2981 return VINF_SUCCESS;
2982}
2983
2984
2985/**
2986 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2987 * not.
2988 *
2989 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2990 * @param a_pCtx The CPU context.
2991 */
2992#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2993# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2994 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
2995 ? (a_pCtx)->eflags.u \
2996 : CPUMRawGetEFlags(a_pVCpu) )
2997#else
2998# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2999 ( (a_pCtx)->eflags.u )
3000#endif
3001
3002/**
3003 * Updates the EFLAGS in the correct manner wrt. PATM.
3004 *
3005 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3006 * @param a_pCtx The CPU context.
3007 * @param a_fEfl The new EFLAGS.
3008 */
3009#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3010# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3011 do { \
3012 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3013 (a_pCtx)->eflags.u = (a_fEfl); \
3014 else \
3015 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3016 } while (0)
3017#else
3018# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3019 do { \
3020 (a_pCtx)->eflags.u = (a_fEfl); \
3021 } while (0)
3022#endif
3023
3024
3025/** @} */
3026
3027/** @name Raising Exceptions.
3028 *
3029 * @{
3030 */
3031
3032/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3033 * @{ */
3034/** CPU exception. */
3035#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3036/** External interrupt (from PIC, APIC, whatever). */
3037#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3038/** Software interrupt (int or into, not bound).
3039 * Returns to the following instruction */
3040#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3041/** Takes an error code. */
3042#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3043/** Takes a CR2. */
3044#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3045/** Generated by the breakpoint instruction. */
3046#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3047/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3048#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3049/** @} */
3050
3051
3052/**
3053 * Loads the specified stack far pointer from the TSS.
3054 *
3055 * @returns VBox strict status code.
3056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3057 * @param pCtx The CPU context.
3058 * @param uCpl The CPL to load the stack for.
3059 * @param pSelSS Where to return the new stack segment.
3060 * @param puEsp Where to return the new stack pointer.
3061 */
3062IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3063 PRTSEL pSelSS, uint32_t *puEsp)
3064{
3065 VBOXSTRICTRC rcStrict;
3066 Assert(uCpl < 4);
3067
3068 switch (pCtx->tr.Attr.n.u4Type)
3069 {
3070 /*
3071 * 16-bit TSS (X86TSS16).
3072 */
3073 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3074 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3075 {
3076 uint32_t off = uCpl * 4 + 2;
3077 if (off + 4 <= pCtx->tr.u32Limit)
3078 {
3079 /** @todo check actual access pattern here. */
3080 uint32_t u32Tmp = 0; /* gcc maybe... */
3081 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3082 if (rcStrict == VINF_SUCCESS)
3083 {
3084 *puEsp = RT_LOWORD(u32Tmp);
3085 *pSelSS = RT_HIWORD(u32Tmp);
3086 return VINF_SUCCESS;
3087 }
3088 }
3089 else
3090 {
3091 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3092 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3093 }
3094 break;
3095 }
3096
3097 /*
3098 * 32-bit TSS (X86TSS32).
3099 */
3100 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3101 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3102 {
3103 uint32_t off = uCpl * 8 + 4;
3104 if (off + 7 <= pCtx->tr.u32Limit)
3105 {
3106/** @todo check actual access pattern here. */
3107 uint64_t u64Tmp;
3108 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3109 if (rcStrict == VINF_SUCCESS)
3110 {
3111 *puEsp = u64Tmp & UINT32_MAX;
3112 *pSelSS = (RTSEL)(u64Tmp >> 32);
3113 return VINF_SUCCESS;
3114 }
3115 }
3116 else
3117 {
3118 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3119 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3120 }
3121 break;
3122 }
3123
3124 default:
3125 AssertFailed();
3126 rcStrict = VERR_IEM_IPE_4;
3127 break;
3128 }
3129
3130 *puEsp = 0; /* make gcc happy */
3131 *pSelSS = 0; /* make gcc happy */
3132 return rcStrict;
3133}
3134
3135
3136/**
3137 * Loads the specified stack pointer from the 64-bit TSS.
3138 *
3139 * @returns VBox strict status code.
3140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3141 * @param pCtx The CPU context.
3142 * @param uCpl The CPL to load the stack for.
3143 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3144 * @param puRsp Where to return the new stack pointer.
3145 */
3146IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3147{
3148 Assert(uCpl < 4);
3149 Assert(uIst < 8);
3150 *puRsp = 0; /* make gcc happy */
3151
3152 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3153
3154 uint32_t off;
3155 if (uIst)
3156 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3157 else
3158 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3159 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3160 {
3161 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3162 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3163 }
3164
3165 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3166}
3167
3168
3169/**
3170 * Adjust the CPU state according to the exception being raised.
3171 *
3172 * @param pCtx The CPU context.
3173 * @param u8Vector The exception that has been raised.
3174 */
3175DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3176{
3177 switch (u8Vector)
3178 {
3179 case X86_XCPT_DB:
3180 pCtx->dr[7] &= ~X86_DR7_GD;
3181 break;
3182 /** @todo Read the AMD and Intel exception reference... */
3183 }
3184}
3185
3186
3187/**
3188 * Implements exceptions and interrupts for real mode.
3189 *
3190 * @returns VBox strict status code.
3191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3192 * @param pCtx The CPU context.
3193 * @param cbInstr The number of bytes to offset rIP by in the return
3194 * address.
3195 * @param u8Vector The interrupt / exception vector number.
3196 * @param fFlags The flags.
3197 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3198 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3199 */
3200IEM_STATIC VBOXSTRICTRC
3201iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3202 PCPUMCTX pCtx,
3203 uint8_t cbInstr,
3204 uint8_t u8Vector,
3205 uint32_t fFlags,
3206 uint16_t uErr,
3207 uint64_t uCr2)
3208{
3209 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3210 NOREF(uErr); NOREF(uCr2);
3211
3212 /*
3213 * Read the IDT entry.
3214 */
3215 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3216 {
3217 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3218 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3219 }
3220 RTFAR16 Idte;
3221 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3222 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3223 return rcStrict;
3224
3225 /*
3226 * Push the stack frame.
3227 */
3228 uint16_t *pu16Frame;
3229 uint64_t uNewRsp;
3230 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3231 if (rcStrict != VINF_SUCCESS)
3232 return rcStrict;
3233
3234 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3235#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3236 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3237 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3238 fEfl |= UINT16_C(0xf000);
3239#endif
3240 pu16Frame[2] = (uint16_t)fEfl;
3241 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3242 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3243 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3244 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3245 return rcStrict;
3246
3247 /*
3248 * Load the vector address into cs:ip and make exception specific state
3249 * adjustments.
3250 */
3251 pCtx->cs.Sel = Idte.sel;
3252 pCtx->cs.ValidSel = Idte.sel;
3253 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3254 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3255 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3256 pCtx->rip = Idte.off;
3257 fEfl &= ~X86_EFL_IF;
3258 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3259
3260 /** @todo do we actually do this in real mode? */
3261 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3262 iemRaiseXcptAdjustState(pCtx, u8Vector);
3263
3264 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3265}
3266
3267
3268/**
3269 * Loads a NULL data selector into when coming from V8086 mode.
3270 *
3271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3272 * @param pSReg Pointer to the segment register.
3273 */
3274IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3275{
3276 pSReg->Sel = 0;
3277 pSReg->ValidSel = 0;
3278 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3279 {
3280 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3281 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3282 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3283 }
3284 else
3285 {
3286 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3287 /** @todo check this on AMD-V */
3288 pSReg->u64Base = 0;
3289 pSReg->u32Limit = 0;
3290 }
3291}
3292
3293
3294/**
3295 * Loads a segment selector during a task switch in V8086 mode.
3296 *
3297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3298 * @param pSReg Pointer to the segment register.
3299 * @param uSel The selector value to load.
3300 */
3301IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3302{
3303 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3304 pSReg->Sel = uSel;
3305 pSReg->ValidSel = uSel;
3306 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3307 pSReg->u64Base = uSel << 4;
3308 pSReg->u32Limit = 0xffff;
3309 pSReg->Attr.u = 0xf3;
3310}
3311
3312
3313/**
3314 * Loads a NULL data selector into a selector register, both the hidden and
3315 * visible parts, in protected mode.
3316 *
3317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3318 * @param pSReg Pointer to the segment register.
3319 * @param uRpl The RPL.
3320 */
3321IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3322{
3323 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3324 * data selector in protected mode. */
3325 pSReg->Sel = uRpl;
3326 pSReg->ValidSel = uRpl;
3327 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3329 {
3330 /* VT-x (Intel 3960x) observed doing something like this. */
3331 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3332 pSReg->u32Limit = UINT32_MAX;
3333 pSReg->u64Base = 0;
3334 }
3335 else
3336 {
3337 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3338 pSReg->u32Limit = 0;
3339 pSReg->u64Base = 0;
3340 }
3341}
3342
3343
3344/**
3345 * Loads a segment selector during a task switch in protected mode.
3346 *
3347 * In this task switch scenario, we would throw \#TS exceptions rather than
3348 * \#GPs.
3349 *
3350 * @returns VBox strict status code.
3351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3352 * @param pSReg Pointer to the segment register.
3353 * @param uSel The new selector value.
3354 *
3355 * @remarks This does _not_ handle CS or SS.
3356 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3357 */
3358IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3359{
3360 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3361
3362 /* Null data selector. */
3363 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3364 {
3365 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3367 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3368 return VINF_SUCCESS;
3369 }
3370
3371 /* Fetch the descriptor. */
3372 IEMSELDESC Desc;
3373 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3374 if (rcStrict != VINF_SUCCESS)
3375 {
3376 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3377 VBOXSTRICTRC_VAL(rcStrict)));
3378 return rcStrict;
3379 }
3380
3381 /* Must be a data segment or readable code segment. */
3382 if ( !Desc.Legacy.Gen.u1DescType
3383 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3384 {
3385 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3386 Desc.Legacy.Gen.u4Type));
3387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3388 }
3389
3390 /* Check privileges for data segments and non-conforming code segments. */
3391 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3392 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3393 {
3394 /* The RPL and the new CPL must be less than or equal to the DPL. */
3395 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3396 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3397 {
3398 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3399 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3400 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3401 }
3402 }
3403
3404 /* Is it there? */
3405 if (!Desc.Legacy.Gen.u1Present)
3406 {
3407 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3408 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3409 }
3410
3411 /* The base and limit. */
3412 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3413 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3414
3415 /*
3416 * Ok, everything checked out fine. Now set the accessed bit before
3417 * committing the result into the registers.
3418 */
3419 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3420 {
3421 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3422 if (rcStrict != VINF_SUCCESS)
3423 return rcStrict;
3424 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3425 }
3426
3427 /* Commit */
3428 pSReg->Sel = uSel;
3429 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3430 pSReg->u32Limit = cbLimit;
3431 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3432 pSReg->ValidSel = uSel;
3433 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3434 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3435 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3436
3437 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3438 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3439 return VINF_SUCCESS;
3440}
3441
3442
3443/**
3444 * Performs a task switch.
3445 *
3446 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3447 * caller is responsible for performing the necessary checks (like DPL, TSS
3448 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3449 * reference for JMP, CALL, IRET.
3450 *
3451 * If the task switch is the due to a software interrupt or hardware exception,
3452 * the caller is responsible for validating the TSS selector and descriptor. See
3453 * Intel Instruction reference for INT n.
3454 *
3455 * @returns VBox strict status code.
3456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3457 * @param pCtx The CPU context.
3458 * @param enmTaskSwitch What caused this task switch.
3459 * @param uNextEip The EIP effective after the task switch.
3460 * @param fFlags The flags.
3461 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3462 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3463 * @param SelTSS The TSS selector of the new task.
3464 * @param pNewDescTSS Pointer to the new TSS descriptor.
3465 */
3466IEM_STATIC VBOXSTRICTRC
3467iemTaskSwitch(PVMCPU pVCpu,
3468 PCPUMCTX pCtx,
3469 IEMTASKSWITCH enmTaskSwitch,
3470 uint32_t uNextEip,
3471 uint32_t fFlags,
3472 uint16_t uErr,
3473 uint64_t uCr2,
3474 RTSEL SelTSS,
3475 PIEMSELDESC pNewDescTSS)
3476{
3477 Assert(!IEM_IS_REAL_MODE(pVCpu));
3478 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3479
3480 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3481 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3482 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3483 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3484 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3485
3486 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3487 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3488
3489 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3490 fIsNewTSS386, pCtx->eip, uNextEip));
3491
3492 /* Update CR2 in case it's a page-fault. */
3493 /** @todo This should probably be done much earlier in IEM/PGM. See
3494 * @bugref{5653#c49}. */
3495 if (fFlags & IEM_XCPT_FLAGS_CR2)
3496 pCtx->cr2 = uCr2;
3497
3498 /*
3499 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3500 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3501 */
3502 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3503 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3504 if (uNewTSSLimit < uNewTSSLimitMin)
3505 {
3506 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3507 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3508 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3509 }
3510
3511 /*
3512 * Check the current TSS limit. The last written byte to the current TSS during the
3513 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3514 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3515 *
3516 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3517 * end up with smaller than "legal" TSS limits.
3518 */
3519 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3520 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3521 if (uCurTSSLimit < uCurTSSLimitMin)
3522 {
3523 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3524 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3525 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3526 }
3527
3528 /*
3529 * Verify that the new TSS can be accessed and map it. Map only the required contents
3530 * and not the entire TSS.
3531 */
3532 void *pvNewTSS;
3533 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3534 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3535 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3536 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3537 * not perform correct translation if this happens. See Intel spec. 7.2.1
3538 * "Task-State Segment" */
3539 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3540 if (rcStrict != VINF_SUCCESS)
3541 {
3542 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3543 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3544 return rcStrict;
3545 }
3546
3547 /*
3548 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3549 */
3550 uint32_t u32EFlags = pCtx->eflags.u32;
3551 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3552 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3553 {
3554 PX86DESC pDescCurTSS;
3555 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3556 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3557 if (rcStrict != VINF_SUCCESS)
3558 {
3559 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3560 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3561 return rcStrict;
3562 }
3563
3564 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3565 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3566 if (rcStrict != VINF_SUCCESS)
3567 {
3568 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3569 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3570 return rcStrict;
3571 }
3572
3573 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3574 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3575 {
3576 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3577 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3578 u32EFlags &= ~X86_EFL_NT;
3579 }
3580 }
3581
3582 /*
3583 * Save the CPU state into the current TSS.
3584 */
3585 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3586 if (GCPtrNewTSS == GCPtrCurTSS)
3587 {
3588 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3589 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3590 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3591 }
3592 if (fIsNewTSS386)
3593 {
3594 /*
3595 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3596 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3597 */
3598 void *pvCurTSS32;
3599 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3600 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3601 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3602 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3603 if (rcStrict != VINF_SUCCESS)
3604 {
3605 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3606 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3607 return rcStrict;
3608 }
3609
3610 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3611 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3612 pCurTSS32->eip = uNextEip;
3613 pCurTSS32->eflags = u32EFlags;
3614 pCurTSS32->eax = pCtx->eax;
3615 pCurTSS32->ecx = pCtx->ecx;
3616 pCurTSS32->edx = pCtx->edx;
3617 pCurTSS32->ebx = pCtx->ebx;
3618 pCurTSS32->esp = pCtx->esp;
3619 pCurTSS32->ebp = pCtx->ebp;
3620 pCurTSS32->esi = pCtx->esi;
3621 pCurTSS32->edi = pCtx->edi;
3622 pCurTSS32->es = pCtx->es.Sel;
3623 pCurTSS32->cs = pCtx->cs.Sel;
3624 pCurTSS32->ss = pCtx->ss.Sel;
3625 pCurTSS32->ds = pCtx->ds.Sel;
3626 pCurTSS32->fs = pCtx->fs.Sel;
3627 pCurTSS32->gs = pCtx->gs.Sel;
3628
3629 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3630 if (rcStrict != VINF_SUCCESS)
3631 {
3632 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3633 VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 }
3637 else
3638 {
3639 /*
3640 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3641 */
3642 void *pvCurTSS16;
3643 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3644 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3645 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3646 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3647 if (rcStrict != VINF_SUCCESS)
3648 {
3649 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3650 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3651 return rcStrict;
3652 }
3653
3654 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3655 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3656 pCurTSS16->ip = uNextEip;
3657 pCurTSS16->flags = u32EFlags;
3658 pCurTSS16->ax = pCtx->ax;
3659 pCurTSS16->cx = pCtx->cx;
3660 pCurTSS16->dx = pCtx->dx;
3661 pCurTSS16->bx = pCtx->bx;
3662 pCurTSS16->sp = pCtx->sp;
3663 pCurTSS16->bp = pCtx->bp;
3664 pCurTSS16->si = pCtx->si;
3665 pCurTSS16->di = pCtx->di;
3666 pCurTSS16->es = pCtx->es.Sel;
3667 pCurTSS16->cs = pCtx->cs.Sel;
3668 pCurTSS16->ss = pCtx->ss.Sel;
3669 pCurTSS16->ds = pCtx->ds.Sel;
3670
3671 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3672 if (rcStrict != VINF_SUCCESS)
3673 {
3674 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3675 VBOXSTRICTRC_VAL(rcStrict)));
3676 return rcStrict;
3677 }
3678 }
3679
3680 /*
3681 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3682 */
3683 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3684 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3685 {
3686 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3687 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3688 pNewTSS->selPrev = pCtx->tr.Sel;
3689 }
3690
3691 /*
3692 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3693 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3694 */
3695 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3696 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3697 bool fNewDebugTrap;
3698 if (fIsNewTSS386)
3699 {
3700 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3701 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3702 uNewEip = pNewTSS32->eip;
3703 uNewEflags = pNewTSS32->eflags;
3704 uNewEax = pNewTSS32->eax;
3705 uNewEcx = pNewTSS32->ecx;
3706 uNewEdx = pNewTSS32->edx;
3707 uNewEbx = pNewTSS32->ebx;
3708 uNewEsp = pNewTSS32->esp;
3709 uNewEbp = pNewTSS32->ebp;
3710 uNewEsi = pNewTSS32->esi;
3711 uNewEdi = pNewTSS32->edi;
3712 uNewES = pNewTSS32->es;
3713 uNewCS = pNewTSS32->cs;
3714 uNewSS = pNewTSS32->ss;
3715 uNewDS = pNewTSS32->ds;
3716 uNewFS = pNewTSS32->fs;
3717 uNewGS = pNewTSS32->gs;
3718 uNewLdt = pNewTSS32->selLdt;
3719 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3720 }
3721 else
3722 {
3723 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3724 uNewCr3 = 0;
3725 uNewEip = pNewTSS16->ip;
3726 uNewEflags = pNewTSS16->flags;
3727 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3728 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3729 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3730 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3731 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3732 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3733 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3734 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3735 uNewES = pNewTSS16->es;
3736 uNewCS = pNewTSS16->cs;
3737 uNewSS = pNewTSS16->ss;
3738 uNewDS = pNewTSS16->ds;
3739 uNewFS = 0;
3740 uNewGS = 0;
3741 uNewLdt = pNewTSS16->selLdt;
3742 fNewDebugTrap = false;
3743 }
3744
3745 if (GCPtrNewTSS == GCPtrCurTSS)
3746 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3747 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3748
3749 /*
3750 * We're done accessing the new TSS.
3751 */
3752 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3753 if (rcStrict != VINF_SUCCESS)
3754 {
3755 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3756 return rcStrict;
3757 }
3758
3759 /*
3760 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3761 */
3762 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3763 {
3764 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3765 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3766 if (rcStrict != VINF_SUCCESS)
3767 {
3768 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3769 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3770 return rcStrict;
3771 }
3772
3773 /* Check that the descriptor indicates the new TSS is available (not busy). */
3774 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3775 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3776 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3777
3778 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3779 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3780 if (rcStrict != VINF_SUCCESS)
3781 {
3782 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3783 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3784 return rcStrict;
3785 }
3786 }
3787
3788 /*
3789 * From this point on, we're technically in the new task. We will defer exceptions
3790 * until the completion of the task switch but before executing any instructions in the new task.
3791 */
3792 pCtx->tr.Sel = SelTSS;
3793 pCtx->tr.ValidSel = SelTSS;
3794 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3795 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3796 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3797 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3798 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3799
3800 /* Set the busy bit in TR. */
3801 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3802 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3803 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3804 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3805 {
3806 uNewEflags |= X86_EFL_NT;
3807 }
3808
3809 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3810 pCtx->cr0 |= X86_CR0_TS;
3811 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3812
3813 pCtx->eip = uNewEip;
3814 pCtx->eax = uNewEax;
3815 pCtx->ecx = uNewEcx;
3816 pCtx->edx = uNewEdx;
3817 pCtx->ebx = uNewEbx;
3818 pCtx->esp = uNewEsp;
3819 pCtx->ebp = uNewEbp;
3820 pCtx->esi = uNewEsi;
3821 pCtx->edi = uNewEdi;
3822
3823 uNewEflags &= X86_EFL_LIVE_MASK;
3824 uNewEflags |= X86_EFL_RA1_MASK;
3825 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3826
3827 /*
3828 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3829 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3830 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3831 */
3832 pCtx->es.Sel = uNewES;
3833 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3834
3835 pCtx->cs.Sel = uNewCS;
3836 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3837
3838 pCtx->ss.Sel = uNewSS;
3839 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3840
3841 pCtx->ds.Sel = uNewDS;
3842 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3843
3844 pCtx->fs.Sel = uNewFS;
3845 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3846
3847 pCtx->gs.Sel = uNewGS;
3848 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3849 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3850
3851 pCtx->ldtr.Sel = uNewLdt;
3852 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3853 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3854 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3855
3856 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3857 {
3858 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3859 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3860 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3861 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3862 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3863 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3864 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3865 }
3866
3867 /*
3868 * Switch CR3 for the new task.
3869 */
3870 if ( fIsNewTSS386
3871 && (pCtx->cr0 & X86_CR0_PG))
3872 {
3873 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3874 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3875 {
3876 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3877 AssertRCSuccessReturn(rc, rc);
3878 }
3879 else
3880 pCtx->cr3 = uNewCr3;
3881
3882 /* Inform PGM. */
3883 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3884 {
3885 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3886 AssertRCReturn(rc, rc);
3887 /* ignore informational status codes */
3888 }
3889 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3890 }
3891
3892 /*
3893 * Switch LDTR for the new task.
3894 */
3895 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3896 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
3897 else
3898 {
3899 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3900
3901 IEMSELDESC DescNewLdt;
3902 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3903 if (rcStrict != VINF_SUCCESS)
3904 {
3905 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3906 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3907 return rcStrict;
3908 }
3909 if ( !DescNewLdt.Legacy.Gen.u1Present
3910 || DescNewLdt.Legacy.Gen.u1DescType
3911 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3912 {
3913 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3914 uNewLdt, DescNewLdt.Legacy.u));
3915 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3916 }
3917
3918 pCtx->ldtr.ValidSel = uNewLdt;
3919 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3920 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3921 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3922 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3923 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3924 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3925 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
3926 }
3927
3928 IEMSELDESC DescSS;
3929 if (IEM_IS_V86_MODE(pVCpu))
3930 {
3931 pVCpu->iem.s.uCpl = 3;
3932 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
3933 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
3934 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
3935 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
3936 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
3937 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
3938 }
3939 else
3940 {
3941 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3942
3943 /*
3944 * Load the stack segment for the new task.
3945 */
3946 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3947 {
3948 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3949 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3950 }
3951
3952 /* Fetch the descriptor. */
3953 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3954 if (rcStrict != VINF_SUCCESS)
3955 {
3956 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3957 VBOXSTRICTRC_VAL(rcStrict)));
3958 return rcStrict;
3959 }
3960
3961 /* SS must be a data segment and writable. */
3962 if ( !DescSS.Legacy.Gen.u1DescType
3963 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3964 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3965 {
3966 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3967 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3968 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3969 }
3970
3971 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3972 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3973 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3974 {
3975 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3976 uNewCpl));
3977 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3978 }
3979
3980 /* Is it there? */
3981 if (!DescSS.Legacy.Gen.u1Present)
3982 {
3983 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3984 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3985 }
3986
3987 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3988 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3989
3990 /* Set the accessed bit before committing the result into SS. */
3991 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3992 {
3993 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3994 if (rcStrict != VINF_SUCCESS)
3995 return rcStrict;
3996 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3997 }
3998
3999 /* Commit SS. */
4000 pCtx->ss.Sel = uNewSS;
4001 pCtx->ss.ValidSel = uNewSS;
4002 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4003 pCtx->ss.u32Limit = cbLimit;
4004 pCtx->ss.u64Base = u64Base;
4005 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4007
4008 /* CPL has changed, update IEM before loading rest of segments. */
4009 pVCpu->iem.s.uCpl = uNewCpl;
4010
4011 /*
4012 * Load the data segments for the new task.
4013 */
4014 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4015 if (rcStrict != VINF_SUCCESS)
4016 return rcStrict;
4017 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4018 if (rcStrict != VINF_SUCCESS)
4019 return rcStrict;
4020 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4021 if (rcStrict != VINF_SUCCESS)
4022 return rcStrict;
4023 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4024 if (rcStrict != VINF_SUCCESS)
4025 return rcStrict;
4026
4027 /*
4028 * Load the code segment for the new task.
4029 */
4030 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4031 {
4032 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4033 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4034 }
4035
4036 /* Fetch the descriptor. */
4037 IEMSELDESC DescCS;
4038 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4039 if (rcStrict != VINF_SUCCESS)
4040 {
4041 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4042 return rcStrict;
4043 }
4044
4045 /* CS must be a code segment. */
4046 if ( !DescCS.Legacy.Gen.u1DescType
4047 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4048 {
4049 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4050 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4051 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4052 }
4053
4054 /* For conforming CS, DPL must be less than or equal to the RPL. */
4055 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4056 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4057 {
4058 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4059 DescCS.Legacy.Gen.u2Dpl));
4060 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4061 }
4062
4063 /* For non-conforming CS, DPL must match RPL. */
4064 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4065 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4066 {
4067 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4068 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4069 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4070 }
4071
4072 /* Is it there? */
4073 if (!DescCS.Legacy.Gen.u1Present)
4074 {
4075 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4076 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4077 }
4078
4079 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4080 u64Base = X86DESC_BASE(&DescCS.Legacy);
4081
4082 /* Set the accessed bit before committing the result into CS. */
4083 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4084 {
4085 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4086 if (rcStrict != VINF_SUCCESS)
4087 return rcStrict;
4088 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4089 }
4090
4091 /* Commit CS. */
4092 pCtx->cs.Sel = uNewCS;
4093 pCtx->cs.ValidSel = uNewCS;
4094 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4095 pCtx->cs.u32Limit = cbLimit;
4096 pCtx->cs.u64Base = u64Base;
4097 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4098 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4099 }
4100
4101 /** @todo Debug trap. */
4102 if (fIsNewTSS386 && fNewDebugTrap)
4103 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4104
4105 /*
4106 * Construct the error code masks based on what caused this task switch.
4107 * See Intel Instruction reference for INT.
4108 */
4109 uint16_t uExt;
4110 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4111 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4112 {
4113 uExt = 1;
4114 }
4115 else
4116 uExt = 0;
4117
4118 /*
4119 * Push any error code on to the new stack.
4120 */
4121 if (fFlags & IEM_XCPT_FLAGS_ERR)
4122 {
4123 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4125 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4126
4127 /* Check that there is sufficient space on the stack. */
4128 /** @todo Factor out segment limit checking for normal/expand down segments
4129 * into a separate function. */
4130 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4131 {
4132 if ( pCtx->esp - 1 > cbLimitSS
4133 || pCtx->esp < cbStackFrame)
4134 {
4135 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4136 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4137 cbStackFrame));
4138 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4139 }
4140 }
4141 else
4142 {
4143 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4144 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4145 {
4146 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4147 cbStackFrame));
4148 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4149 }
4150 }
4151
4152
4153 if (fIsNewTSS386)
4154 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4155 else
4156 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4157 if (rcStrict != VINF_SUCCESS)
4158 {
4159 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
4160 VBOXSTRICTRC_VAL(rcStrict)));
4161 return rcStrict;
4162 }
4163 }
4164
4165 /* Check the new EIP against the new CS limit. */
4166 if (pCtx->eip > pCtx->cs.u32Limit)
4167 {
4168 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4169 pCtx->eip, pCtx->cs.u32Limit));
4170 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4171 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4172 }
4173
4174 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4175 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4176}
4177
4178
4179/**
4180 * Implements exceptions and interrupts for protected mode.
4181 *
4182 * @returns VBox strict status code.
4183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4184 * @param pCtx The CPU context.
4185 * @param cbInstr The number of bytes to offset rIP by in the return
4186 * address.
4187 * @param u8Vector The interrupt / exception vector number.
4188 * @param fFlags The flags.
4189 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4190 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4191 */
4192IEM_STATIC VBOXSTRICTRC
4193iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4194 PCPUMCTX pCtx,
4195 uint8_t cbInstr,
4196 uint8_t u8Vector,
4197 uint32_t fFlags,
4198 uint16_t uErr,
4199 uint64_t uCr2)
4200{
4201 /*
4202 * Read the IDT entry.
4203 */
4204 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4205 {
4206 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4207 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4208 }
4209 X86DESC Idte;
4210 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4211 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4212 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4213 return rcStrict;
4214 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4215 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4216 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4217
4218 /*
4219 * Check the descriptor type, DPL and such.
4220 * ASSUMES this is done in the same order as described for call-gate calls.
4221 */
4222 if (Idte.Gate.u1DescType)
4223 {
4224 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4225 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4226 }
4227 bool fTaskGate = false;
4228 uint8_t f32BitGate = true;
4229 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4230 switch (Idte.Gate.u4Type)
4231 {
4232 case X86_SEL_TYPE_SYS_UNDEFINED:
4233 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4234 case X86_SEL_TYPE_SYS_LDT:
4235 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4236 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4237 case X86_SEL_TYPE_SYS_UNDEFINED2:
4238 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4239 case X86_SEL_TYPE_SYS_UNDEFINED3:
4240 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4241 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4242 case X86_SEL_TYPE_SYS_UNDEFINED4:
4243 {
4244 /** @todo check what actually happens when the type is wrong...
4245 * esp. call gates. */
4246 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4247 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4248 }
4249
4250 case X86_SEL_TYPE_SYS_286_INT_GATE:
4251 f32BitGate = false;
4252 case X86_SEL_TYPE_SYS_386_INT_GATE:
4253 fEflToClear |= X86_EFL_IF;
4254 break;
4255
4256 case X86_SEL_TYPE_SYS_TASK_GATE:
4257 fTaskGate = true;
4258#ifndef IEM_IMPLEMENTS_TASKSWITCH
4259 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4260#endif
4261 break;
4262
4263 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4264 f32BitGate = false;
4265 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4266 break;
4267
4268 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4269 }
4270
4271 /* Check DPL against CPL if applicable. */
4272 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4273 {
4274 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4275 {
4276 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4277 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4278 }
4279 }
4280
4281 /* Is it there? */
4282 if (!Idte.Gate.u1Present)
4283 {
4284 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4285 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4286 }
4287
4288 /* Is it a task-gate? */
4289 if (fTaskGate)
4290 {
4291 /*
4292 * Construct the error code masks based on what caused this task switch.
4293 * See Intel Instruction reference for INT.
4294 */
4295 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4296 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4297 RTSEL SelTSS = Idte.Gate.u16Sel;
4298
4299 /*
4300 * Fetch the TSS descriptor in the GDT.
4301 */
4302 IEMSELDESC DescTSS;
4303 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4304 if (rcStrict != VINF_SUCCESS)
4305 {
4306 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4307 VBOXSTRICTRC_VAL(rcStrict)));
4308 return rcStrict;
4309 }
4310
4311 /* The TSS descriptor must be a system segment and be available (not busy). */
4312 if ( DescTSS.Legacy.Gen.u1DescType
4313 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4314 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4315 {
4316 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4317 u8Vector, SelTSS, DescTSS.Legacy.au64));
4318 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4319 }
4320
4321 /* The TSS must be present. */
4322 if (!DescTSS.Legacy.Gen.u1Present)
4323 {
4324 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4325 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4326 }
4327
4328 /* Do the actual task switch. */
4329 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4330 }
4331
4332 /* A null CS is bad. */
4333 RTSEL NewCS = Idte.Gate.u16Sel;
4334 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4335 {
4336 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4337 return iemRaiseGeneralProtectionFault0(pVCpu);
4338 }
4339
4340 /* Fetch the descriptor for the new CS. */
4341 IEMSELDESC DescCS;
4342 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4343 if (rcStrict != VINF_SUCCESS)
4344 {
4345 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4346 return rcStrict;
4347 }
4348
4349 /* Must be a code segment. */
4350 if (!DescCS.Legacy.Gen.u1DescType)
4351 {
4352 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4353 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4354 }
4355 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4356 {
4357 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4358 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4359 }
4360
4361 /* Don't allow lowering the privilege level. */
4362 /** @todo Does the lowering of privileges apply to software interrupts
4363 * only? This has bearings on the more-privileged or
4364 * same-privilege stack behavior further down. A testcase would
4365 * be nice. */
4366 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4367 {
4368 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4369 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4370 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4371 }
4372
4373 /* Make sure the selector is present. */
4374 if (!DescCS.Legacy.Gen.u1Present)
4375 {
4376 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4377 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4378 }
4379
4380 /* Check the new EIP against the new CS limit. */
4381 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4382 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4383 ? Idte.Gate.u16OffsetLow
4384 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4385 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4386 if (uNewEip > cbLimitCS)
4387 {
4388 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4389 u8Vector, uNewEip, cbLimitCS, NewCS));
4390 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4391 }
4392
4393 /* Calc the flag image to push. */
4394 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4395 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4396 fEfl &= ~X86_EFL_RF;
4397 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4398 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4399
4400 /* From V8086 mode only go to CPL 0. */
4401 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4402 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4403 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4404 {
4405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4406 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4407 }
4408
4409 /*
4410 * If the privilege level changes, we need to get a new stack from the TSS.
4411 * This in turns means validating the new SS and ESP...
4412 */
4413 if (uNewCpl != pVCpu->iem.s.uCpl)
4414 {
4415 RTSEL NewSS;
4416 uint32_t uNewEsp;
4417 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4418 if (rcStrict != VINF_SUCCESS)
4419 return rcStrict;
4420
4421 IEMSELDESC DescSS;
4422 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4423 if (rcStrict != VINF_SUCCESS)
4424 return rcStrict;
4425
4426 /* Check that there is sufficient space for the stack frame. */
4427 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4428 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4429 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4430 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4431
4432 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4433 {
4434 if ( uNewEsp - 1 > cbLimitSS
4435 || uNewEsp < cbStackFrame)
4436 {
4437 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4438 u8Vector, NewSS, uNewEsp, cbStackFrame));
4439 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4440 }
4441 }
4442 else
4443 {
4444 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4445 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4446 {
4447 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4448 u8Vector, NewSS, uNewEsp, cbStackFrame));
4449 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4450 }
4451 }
4452
4453 /*
4454 * Start making changes.
4455 */
4456
4457 /* Set the new CPL so that stack accesses use it. */
4458 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4459 pVCpu->iem.s.uCpl = uNewCpl;
4460
4461 /* Create the stack frame. */
4462 RTPTRUNION uStackFrame;
4463 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4464 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4465 if (rcStrict != VINF_SUCCESS)
4466 return rcStrict;
4467 void * const pvStackFrame = uStackFrame.pv;
4468 if (f32BitGate)
4469 {
4470 if (fFlags & IEM_XCPT_FLAGS_ERR)
4471 *uStackFrame.pu32++ = uErr;
4472 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4473 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4474 uStackFrame.pu32[2] = fEfl;
4475 uStackFrame.pu32[3] = pCtx->esp;
4476 uStackFrame.pu32[4] = pCtx->ss.Sel;
4477 if (fEfl & X86_EFL_VM)
4478 {
4479 uStackFrame.pu32[1] = pCtx->cs.Sel;
4480 uStackFrame.pu32[5] = pCtx->es.Sel;
4481 uStackFrame.pu32[6] = pCtx->ds.Sel;
4482 uStackFrame.pu32[7] = pCtx->fs.Sel;
4483 uStackFrame.pu32[8] = pCtx->gs.Sel;
4484 }
4485 }
4486 else
4487 {
4488 if (fFlags & IEM_XCPT_FLAGS_ERR)
4489 *uStackFrame.pu16++ = uErr;
4490 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4491 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4492 uStackFrame.pu16[2] = fEfl;
4493 uStackFrame.pu16[3] = pCtx->sp;
4494 uStackFrame.pu16[4] = pCtx->ss.Sel;
4495 if (fEfl & X86_EFL_VM)
4496 {
4497 uStackFrame.pu16[1] = pCtx->cs.Sel;
4498 uStackFrame.pu16[5] = pCtx->es.Sel;
4499 uStackFrame.pu16[6] = pCtx->ds.Sel;
4500 uStackFrame.pu16[7] = pCtx->fs.Sel;
4501 uStackFrame.pu16[8] = pCtx->gs.Sel;
4502 }
4503 }
4504 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4505 if (rcStrict != VINF_SUCCESS)
4506 return rcStrict;
4507
4508 /* Mark the selectors 'accessed' (hope this is the correct time). */
4509 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4510 * after pushing the stack frame? (Write protect the gdt + stack to
4511 * find out.) */
4512 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4513 {
4514 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4515 if (rcStrict != VINF_SUCCESS)
4516 return rcStrict;
4517 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4518 }
4519
4520 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4521 {
4522 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4523 if (rcStrict != VINF_SUCCESS)
4524 return rcStrict;
4525 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4526 }
4527
4528 /*
4529 * Start comitting the register changes (joins with the DPL=CPL branch).
4530 */
4531 pCtx->ss.Sel = NewSS;
4532 pCtx->ss.ValidSel = NewSS;
4533 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4534 pCtx->ss.u32Limit = cbLimitSS;
4535 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4536 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4537 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4538 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4539 * SP is loaded).
4540 * Need to check the other combinations too:
4541 * - 16-bit TSS, 32-bit handler
4542 * - 32-bit TSS, 16-bit handler */
4543 if (!pCtx->ss.Attr.n.u1DefBig)
4544 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4545 else
4546 pCtx->rsp = uNewEsp - cbStackFrame;
4547
4548 if (fEfl & X86_EFL_VM)
4549 {
4550 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4551 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4552 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4553 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4554 }
4555 }
4556 /*
4557 * Same privilege, no stack change and smaller stack frame.
4558 */
4559 else
4560 {
4561 uint64_t uNewRsp;
4562 RTPTRUNION uStackFrame;
4563 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4564 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4565 if (rcStrict != VINF_SUCCESS)
4566 return rcStrict;
4567 void * const pvStackFrame = uStackFrame.pv;
4568
4569 if (f32BitGate)
4570 {
4571 if (fFlags & IEM_XCPT_FLAGS_ERR)
4572 *uStackFrame.pu32++ = uErr;
4573 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4574 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4575 uStackFrame.pu32[2] = fEfl;
4576 }
4577 else
4578 {
4579 if (fFlags & IEM_XCPT_FLAGS_ERR)
4580 *uStackFrame.pu16++ = uErr;
4581 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4582 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4583 uStackFrame.pu16[2] = fEfl;
4584 }
4585 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4586 if (rcStrict != VINF_SUCCESS)
4587 return rcStrict;
4588
4589 /* Mark the CS selector as 'accessed'. */
4590 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4591 {
4592 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4593 if (rcStrict != VINF_SUCCESS)
4594 return rcStrict;
4595 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4596 }
4597
4598 /*
4599 * Start committing the register changes (joins with the other branch).
4600 */
4601 pCtx->rsp = uNewRsp;
4602 }
4603
4604 /* ... register committing continues. */
4605 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4606 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4607 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4608 pCtx->cs.u32Limit = cbLimitCS;
4609 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4610 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4611
4612 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4613 fEfl &= ~fEflToClear;
4614 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4615
4616 if (fFlags & IEM_XCPT_FLAGS_CR2)
4617 pCtx->cr2 = uCr2;
4618
4619 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4620 iemRaiseXcptAdjustState(pCtx, u8Vector);
4621
4622 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4623}
4624
4625
4626/**
4627 * Implements exceptions and interrupts for long mode.
4628 *
4629 * @returns VBox strict status code.
4630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4631 * @param pCtx The CPU context.
4632 * @param cbInstr The number of bytes to offset rIP by in the return
4633 * address.
4634 * @param u8Vector The interrupt / exception vector number.
4635 * @param fFlags The flags.
4636 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4637 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4638 */
4639IEM_STATIC VBOXSTRICTRC
4640iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4641 PCPUMCTX pCtx,
4642 uint8_t cbInstr,
4643 uint8_t u8Vector,
4644 uint32_t fFlags,
4645 uint16_t uErr,
4646 uint64_t uCr2)
4647{
4648 /*
4649 * Read the IDT entry.
4650 */
4651 uint16_t offIdt = (uint16_t)u8Vector << 4;
4652 if (pCtx->idtr.cbIdt < offIdt + 7)
4653 {
4654 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4655 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4656 }
4657 X86DESC64 Idte;
4658 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4659 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4660 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4661 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4662 return rcStrict;
4663 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4664 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4665 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4666
4667 /*
4668 * Check the descriptor type, DPL and such.
4669 * ASSUMES this is done in the same order as described for call-gate calls.
4670 */
4671 if (Idte.Gate.u1DescType)
4672 {
4673 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4674 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4675 }
4676 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4677 switch (Idte.Gate.u4Type)
4678 {
4679 case AMD64_SEL_TYPE_SYS_INT_GATE:
4680 fEflToClear |= X86_EFL_IF;
4681 break;
4682 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4683 break;
4684
4685 default:
4686 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4687 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4688 }
4689
4690 /* Check DPL against CPL if applicable. */
4691 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4692 {
4693 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4694 {
4695 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4696 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4697 }
4698 }
4699
4700 /* Is it there? */
4701 if (!Idte.Gate.u1Present)
4702 {
4703 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4704 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4705 }
4706
4707 /* A null CS is bad. */
4708 RTSEL NewCS = Idte.Gate.u16Sel;
4709 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4710 {
4711 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4712 return iemRaiseGeneralProtectionFault0(pVCpu);
4713 }
4714
4715 /* Fetch the descriptor for the new CS. */
4716 IEMSELDESC DescCS;
4717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4718 if (rcStrict != VINF_SUCCESS)
4719 {
4720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4721 return rcStrict;
4722 }
4723
4724 /* Must be a 64-bit code segment. */
4725 if (!DescCS.Long.Gen.u1DescType)
4726 {
4727 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4728 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4729 }
4730 if ( !DescCS.Long.Gen.u1Long
4731 || DescCS.Long.Gen.u1DefBig
4732 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4733 {
4734 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4735 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4736 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4737 }
4738
4739 /* Don't allow lowering the privilege level. For non-conforming CS
4740 selectors, the CS.DPL sets the privilege level the trap/interrupt
4741 handler runs at. For conforming CS selectors, the CPL remains
4742 unchanged, but the CS.DPL must be <= CPL. */
4743 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4744 * when CPU in Ring-0. Result \#GP? */
4745 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4746 {
4747 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4748 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4749 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4750 }
4751
4752
4753 /* Make sure the selector is present. */
4754 if (!DescCS.Legacy.Gen.u1Present)
4755 {
4756 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4757 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4758 }
4759
4760 /* Check that the new RIP is canonical. */
4761 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4762 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4763 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4764 if (!IEM_IS_CANONICAL(uNewRip))
4765 {
4766 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4767 return iemRaiseGeneralProtectionFault0(pVCpu);
4768 }
4769
4770 /*
4771 * If the privilege level changes or if the IST isn't zero, we need to get
4772 * a new stack from the TSS.
4773 */
4774 uint64_t uNewRsp;
4775 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4776 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4777 if ( uNewCpl != pVCpu->iem.s.uCpl
4778 || Idte.Gate.u3IST != 0)
4779 {
4780 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4781 if (rcStrict != VINF_SUCCESS)
4782 return rcStrict;
4783 }
4784 else
4785 uNewRsp = pCtx->rsp;
4786 uNewRsp &= ~(uint64_t)0xf;
4787
4788 /*
4789 * Calc the flag image to push.
4790 */
4791 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4792 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4793 fEfl &= ~X86_EFL_RF;
4794 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4795 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4796
4797 /*
4798 * Start making changes.
4799 */
4800 /* Set the new CPL so that stack accesses use it. */
4801 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4802 pVCpu->iem.s.uCpl = uNewCpl;
4803
4804 /* Create the stack frame. */
4805 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4806 RTPTRUNION uStackFrame;
4807 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4808 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4809 if (rcStrict != VINF_SUCCESS)
4810 return rcStrict;
4811 void * const pvStackFrame = uStackFrame.pv;
4812
4813 if (fFlags & IEM_XCPT_FLAGS_ERR)
4814 *uStackFrame.pu64++ = uErr;
4815 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4816 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4817 uStackFrame.pu64[2] = fEfl;
4818 uStackFrame.pu64[3] = pCtx->rsp;
4819 uStackFrame.pu64[4] = pCtx->ss.Sel;
4820 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4821 if (rcStrict != VINF_SUCCESS)
4822 return rcStrict;
4823
4824 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4825 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4826 * after pushing the stack frame? (Write protect the gdt + stack to
4827 * find out.) */
4828 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4829 {
4830 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4831 if (rcStrict != VINF_SUCCESS)
4832 return rcStrict;
4833 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4834 }
4835
4836 /*
4837 * Start comitting the register changes.
4838 */
4839 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4840 * hidden registers when interrupting 32-bit or 16-bit code! */
4841 if (uNewCpl != uOldCpl)
4842 {
4843 pCtx->ss.Sel = 0 | uNewCpl;
4844 pCtx->ss.ValidSel = 0 | uNewCpl;
4845 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4846 pCtx->ss.u32Limit = UINT32_MAX;
4847 pCtx->ss.u64Base = 0;
4848 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4849 }
4850 pCtx->rsp = uNewRsp - cbStackFrame;
4851 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4852 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4853 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4854 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4855 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4856 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4857 pCtx->rip = uNewRip;
4858
4859 fEfl &= ~fEflToClear;
4860 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4861
4862 if (fFlags & IEM_XCPT_FLAGS_CR2)
4863 pCtx->cr2 = uCr2;
4864
4865 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4866 iemRaiseXcptAdjustState(pCtx, u8Vector);
4867
4868 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4869}
4870
4871
4872/**
4873 * Implements exceptions and interrupts.
4874 *
4875 * All exceptions and interrupts goes thru this function!
4876 *
4877 * @returns VBox strict status code.
4878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4879 * @param cbInstr The number of bytes to offset rIP by in the return
4880 * address.
4881 * @param u8Vector The interrupt / exception vector number.
4882 * @param fFlags The flags.
4883 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4884 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4885 */
4886DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
4887iemRaiseXcptOrInt(PVMCPU pVCpu,
4888 uint8_t cbInstr,
4889 uint8_t u8Vector,
4890 uint32_t fFlags,
4891 uint16_t uErr,
4892 uint64_t uCr2)
4893{
4894 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4895#ifdef IN_RING0
4896 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
4897 AssertRCReturn(rc, rc);
4898#endif
4899
4900#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4901 /*
4902 * Flush prefetch buffer
4903 */
4904 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4905#endif
4906
4907 /*
4908 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4909 */
4910 if ( pCtx->eflags.Bits.u1VM
4911 && pCtx->eflags.Bits.u2IOPL != 3
4912 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4913 && (pCtx->cr0 & X86_CR0_PE) )
4914 {
4915 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4916 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4917 u8Vector = X86_XCPT_GP;
4918 uErr = 0;
4919 }
4920#ifdef DBGFTRACE_ENABLED
4921 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4922 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4923 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
4924#endif
4925
4926 /*
4927 * Do recursion accounting.
4928 */
4929 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4930 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4931 if (pVCpu->iem.s.cXcptRecursions == 0)
4932 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4933 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4934 else
4935 {
4936 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4937 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4938
4939 /** @todo double and tripple faults. */
4940 if (pVCpu->iem.s.cXcptRecursions >= 3)
4941 {
4942#ifdef DEBUG_bird
4943 AssertFailed();
4944#endif
4945 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4946 }
4947
4948 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4949 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4950 {
4951 ....
4952 } */
4953 }
4954 pVCpu->iem.s.cXcptRecursions++;
4955 pVCpu->iem.s.uCurXcpt = u8Vector;
4956 pVCpu->iem.s.fCurXcpt = fFlags;
4957
4958 /*
4959 * Extensive logging.
4960 */
4961#if defined(LOG_ENABLED) && defined(IN_RING3)
4962 if (LogIs3Enabled())
4963 {
4964 PVM pVM = pVCpu->CTX_SUFF(pVM);
4965 char szRegs[4096];
4966 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4967 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4968 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4969 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4970 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4971 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4972 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4973 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4974 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4975 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4976 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4977 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4978 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4979 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4980 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4981 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4982 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4983 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4984 " efer=%016VR{efer}\n"
4985 " pat=%016VR{pat}\n"
4986 " sf_mask=%016VR{sf_mask}\n"
4987 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4988 " lstar=%016VR{lstar}\n"
4989 " star=%016VR{star} cstar=%016VR{cstar}\n"
4990 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4991 );
4992
4993 char szInstr[256];
4994 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4995 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4996 szInstr, sizeof(szInstr), NULL);
4997 Log3(("%s%s\n", szRegs, szInstr));
4998 }
4999#endif /* LOG_ENABLED */
5000
5001 /*
5002 * Call the mode specific worker function.
5003 */
5004 VBOXSTRICTRC rcStrict;
5005 if (!(pCtx->cr0 & X86_CR0_PE))
5006 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5007 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5008 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5009 else
5010 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5011
5012 /* Flush the prefetch buffer. */
5013#ifdef IEM_WITH_CODE_TLB
5014 pVCpu->iem.s.pbInstrBuf = NULL;
5015#else
5016 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5017#endif
5018
5019 /*
5020 * Unwind.
5021 */
5022 pVCpu->iem.s.cXcptRecursions--;
5023 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5024 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5025 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5026 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5027 return rcStrict;
5028}
5029
5030#ifdef IEM_WITH_SETJMP
5031/**
5032 * See iemRaiseXcptOrInt. Will not return.
5033 */
5034IEM_STATIC DECL_NO_RETURN(void)
5035iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5036 uint8_t cbInstr,
5037 uint8_t u8Vector,
5038 uint32_t fFlags,
5039 uint16_t uErr,
5040 uint64_t uCr2)
5041{
5042 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5043 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5044}
5045#endif
5046
5047
5048/** \#DE - 00. */
5049DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5050{
5051 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5052}
5053
5054
5055/** \#DB - 01.
5056 * @note This automatically clear DR7.GD. */
5057DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5058{
5059 /** @todo set/clear RF. */
5060 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5061 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5062}
5063
5064
5065/** \#UD - 06. */
5066DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5067{
5068 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5069}
5070
5071
5072/** \#NM - 07. */
5073DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5074{
5075 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5076}
5077
5078
5079/** \#TS(err) - 0a. */
5080DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5081{
5082 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5083}
5084
5085
5086/** \#TS(tr) - 0a. */
5087DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5088{
5089 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5090 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5091}
5092
5093
5094/** \#TS(0) - 0a. */
5095DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5096{
5097 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5098 0, 0);
5099}
5100
5101
5102/** \#TS(err) - 0a. */
5103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5104{
5105 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5106 uSel & X86_SEL_MASK_OFF_RPL, 0);
5107}
5108
5109
5110/** \#NP(err) - 0b. */
5111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5112{
5113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5114}
5115
5116
5117/** \#NP(seg) - 0b. */
5118DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5119{
5120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5121 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5122}
5123
5124
5125/** \#NP(sel) - 0b. */
5126DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5127{
5128 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5129 uSel & ~X86_SEL_RPL, 0);
5130}
5131
5132
5133/** \#SS(seg) - 0c. */
5134DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5135{
5136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5137 uSel & ~X86_SEL_RPL, 0);
5138}
5139
5140
5141/** \#SS(err) - 0c. */
5142DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5143{
5144 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5145}
5146
5147
5148/** \#GP(n) - 0d. */
5149DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5150{
5151 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5152}
5153
5154
5155/** \#GP(0) - 0d. */
5156DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5157{
5158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5159}
5160
5161#ifdef IEM_WITH_SETJMP
5162/** \#GP(0) - 0d. */
5163DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5164{
5165 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5166}
5167#endif
5168
5169
5170/** \#GP(sel) - 0d. */
5171DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5172{
5173 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5174 Sel & ~X86_SEL_RPL, 0);
5175}
5176
5177
5178/** \#GP(0) - 0d. */
5179DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5180{
5181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5182}
5183
5184
5185/** \#GP(sel) - 0d. */
5186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5187{
5188 NOREF(iSegReg); NOREF(fAccess);
5189 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5190 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5191}
5192
5193#ifdef IEM_WITH_SETJMP
5194/** \#GP(sel) - 0d, longjmp. */
5195DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5196{
5197 NOREF(iSegReg); NOREF(fAccess);
5198 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5199 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5200}
5201#endif
5202
5203/** \#GP(sel) - 0d. */
5204DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5205{
5206 NOREF(Sel);
5207 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5208}
5209
5210#ifdef IEM_WITH_SETJMP
5211/** \#GP(sel) - 0d, longjmp. */
5212DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5213{
5214 NOREF(Sel);
5215 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5216}
5217#endif
5218
5219
5220/** \#GP(sel) - 0d. */
5221DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5222{
5223 NOREF(iSegReg); NOREF(fAccess);
5224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5225}
5226
5227#ifdef IEM_WITH_SETJMP
5228/** \#GP(sel) - 0d, longjmp. */
5229DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5230 uint32_t fAccess)
5231{
5232 NOREF(iSegReg); NOREF(fAccess);
5233 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5234}
5235#endif
5236
5237
5238/** \#PF(n) - 0e. */
5239DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5240{
5241 uint16_t uErr;
5242 switch (rc)
5243 {
5244 case VERR_PAGE_NOT_PRESENT:
5245 case VERR_PAGE_TABLE_NOT_PRESENT:
5246 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5247 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5248 uErr = 0;
5249 break;
5250
5251 default:
5252 AssertMsgFailed(("%Rrc\n", rc));
5253 case VERR_ACCESS_DENIED:
5254 uErr = X86_TRAP_PF_P;
5255 break;
5256
5257 /** @todo reserved */
5258 }
5259
5260 if (pVCpu->iem.s.uCpl == 3)
5261 uErr |= X86_TRAP_PF_US;
5262
5263 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5264 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5265 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5266 uErr |= X86_TRAP_PF_ID;
5267
5268#if 0 /* This is so much non-sense, really. Why was it done like that? */
5269 /* Note! RW access callers reporting a WRITE protection fault, will clear
5270 the READ flag before calling. So, read-modify-write accesses (RW)
5271 can safely be reported as READ faults. */
5272 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5273 uErr |= X86_TRAP_PF_RW;
5274#else
5275 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5276 {
5277 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5278 uErr |= X86_TRAP_PF_RW;
5279 }
5280#endif
5281
5282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5283 uErr, GCPtrWhere);
5284}
5285
5286
5287/** \#MF(0) - 10. */
5288DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5289{
5290 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5291}
5292
5293
5294/** \#AC(0) - 11. */
5295DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5296{
5297 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5298}
5299
5300
5301/**
5302 * Macro for calling iemCImplRaiseDivideError().
5303 *
5304 * This enables us to add/remove arguments and force different levels of
5305 * inlining as we wish.
5306 *
5307 * @return Strict VBox status code.
5308 */
5309#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5310IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5311{
5312 NOREF(cbInstr);
5313 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5314}
5315
5316
5317/**
5318 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5319 *
5320 * This enables us to add/remove arguments and force different levels of
5321 * inlining as we wish.
5322 *
5323 * @return Strict VBox status code.
5324 */
5325#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5326IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5327{
5328 NOREF(cbInstr);
5329 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5330}
5331
5332
5333/**
5334 * Macro for calling iemCImplRaiseInvalidOpcode().
5335 *
5336 * This enables us to add/remove arguments and force different levels of
5337 * inlining as we wish.
5338 *
5339 * @return Strict VBox status code.
5340 */
5341#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5342IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5343{
5344 NOREF(cbInstr);
5345 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5346}
5347
5348
5349/** @} */
5350
5351
5352/*
5353 *
5354 * Helpers routines.
5355 * Helpers routines.
5356 * Helpers routines.
5357 *
5358 */
5359
5360/**
5361 * Recalculates the effective operand size.
5362 *
5363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5364 */
5365IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5366{
5367 switch (pVCpu->iem.s.enmCpuMode)
5368 {
5369 case IEMMODE_16BIT:
5370 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5371 break;
5372 case IEMMODE_32BIT:
5373 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5374 break;
5375 case IEMMODE_64BIT:
5376 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5377 {
5378 case 0:
5379 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5380 break;
5381 case IEM_OP_PRF_SIZE_OP:
5382 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5383 break;
5384 case IEM_OP_PRF_SIZE_REX_W:
5385 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5386 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5387 break;
5388 }
5389 break;
5390 default:
5391 AssertFailed();
5392 }
5393}
5394
5395
5396/**
5397 * Sets the default operand size to 64-bit and recalculates the effective
5398 * operand size.
5399 *
5400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5401 */
5402IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5403{
5404 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5405 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5406 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5407 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5408 else
5409 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5410}
5411
5412
5413/*
5414 *
5415 * Common opcode decoders.
5416 * Common opcode decoders.
5417 * Common opcode decoders.
5418 *
5419 */
5420//#include <iprt/mem.h>
5421
5422/**
5423 * Used to add extra details about a stub case.
5424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5425 */
5426IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5427{
5428#if defined(LOG_ENABLED) && defined(IN_RING3)
5429 PVM pVM = pVCpu->CTX_SUFF(pVM);
5430 char szRegs[4096];
5431 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5432 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5433 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5434 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5435 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5436 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5437 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5438 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5439 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5440 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5441 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5442 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5443 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5444 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5445 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5446 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5447 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5448 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5449 " efer=%016VR{efer}\n"
5450 " pat=%016VR{pat}\n"
5451 " sf_mask=%016VR{sf_mask}\n"
5452 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5453 " lstar=%016VR{lstar}\n"
5454 " star=%016VR{star} cstar=%016VR{cstar}\n"
5455 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5456 );
5457
5458 char szInstr[256];
5459 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5460 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5461 szInstr, sizeof(szInstr), NULL);
5462
5463 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5464#else
5465 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5466#endif
5467}
5468
5469/**
5470 * Complains about a stub.
5471 *
5472 * Providing two versions of this macro, one for daily use and one for use when
5473 * working on IEM.
5474 */
5475#if 0
5476# define IEMOP_BITCH_ABOUT_STUB() \
5477 do { \
5478 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5479 iemOpStubMsg2(pVCpu); \
5480 RTAssertPanic(); \
5481 } while (0)
5482#else
5483# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5484#endif
5485
5486/** Stubs an opcode. */
5487#define FNIEMOP_STUB(a_Name) \
5488 FNIEMOP_DEF(a_Name) \
5489 { \
5490 IEMOP_BITCH_ABOUT_STUB(); \
5491 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5492 } \
5493 typedef int ignore_semicolon
5494
5495/** Stubs an opcode. */
5496#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5497 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5498 { \
5499 IEMOP_BITCH_ABOUT_STUB(); \
5500 NOREF(a_Name0); \
5501 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5502 } \
5503 typedef int ignore_semicolon
5504
5505/** Stubs an opcode which currently should raise \#UD. */
5506#define FNIEMOP_UD_STUB(a_Name) \
5507 FNIEMOP_DEF(a_Name) \
5508 { \
5509 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5510 return IEMOP_RAISE_INVALID_OPCODE(); \
5511 } \
5512 typedef int ignore_semicolon
5513
5514/** Stubs an opcode which currently should raise \#UD. */
5515#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5516 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5517 { \
5518 NOREF(a_Name0); \
5519 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5520 return IEMOP_RAISE_INVALID_OPCODE(); \
5521 } \
5522 typedef int ignore_semicolon
5523
5524
5525
5526/** @name Register Access.
5527 * @{
5528 */
5529
5530/**
5531 * Gets a reference (pointer) to the specified hidden segment register.
5532 *
5533 * @returns Hidden register reference.
5534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5535 * @param iSegReg The segment register.
5536 */
5537IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5538{
5539 Assert(iSegReg < X86_SREG_COUNT);
5540 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5541 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5542
5543#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5544 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5545 { /* likely */ }
5546 else
5547 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5548#else
5549 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5550#endif
5551 return pSReg;
5552}
5553
5554
5555/**
5556 * Ensures that the given hidden segment register is up to date.
5557 *
5558 * @returns Hidden register reference.
5559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5560 * @param pSReg The segment register.
5561 */
5562IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5563{
5564#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5565 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5566 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5567#else
5568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5569 NOREF(pVCpu);
5570#endif
5571 return pSReg;
5572}
5573
5574
5575/**
5576 * Gets a reference (pointer) to the specified segment register (the selector
5577 * value).
5578 *
5579 * @returns Pointer to the selector variable.
5580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5581 * @param iSegReg The segment register.
5582 */
5583DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5584{
5585 Assert(iSegReg < X86_SREG_COUNT);
5586 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5587 return &pCtx->aSRegs[iSegReg].Sel;
5588}
5589
5590
5591/**
5592 * Fetches the selector value of a segment register.
5593 *
5594 * @returns The selector value.
5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5596 * @param iSegReg The segment register.
5597 */
5598DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5599{
5600 Assert(iSegReg < X86_SREG_COUNT);
5601 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5602}
5603
5604
5605/**
5606 * Gets a reference (pointer) to the specified general purpose register.
5607 *
5608 * @returns Register reference.
5609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5610 * @param iReg The general purpose register.
5611 */
5612DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5613{
5614 Assert(iReg < 16);
5615 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5616 return &pCtx->aGRegs[iReg];
5617}
5618
5619
5620/**
5621 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5622 *
5623 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5624 *
5625 * @returns Register reference.
5626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5627 * @param iReg The register.
5628 */
5629DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5630{
5631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5632 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5633 {
5634 Assert(iReg < 16);
5635 return &pCtx->aGRegs[iReg].u8;
5636 }
5637 /* high 8-bit register. */
5638 Assert(iReg < 8);
5639 return &pCtx->aGRegs[iReg & 3].bHi;
5640}
5641
5642
5643/**
5644 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5645 *
5646 * @returns Register reference.
5647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5648 * @param iReg The register.
5649 */
5650DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5651{
5652 Assert(iReg < 16);
5653 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5654 return &pCtx->aGRegs[iReg].u16;
5655}
5656
5657
5658/**
5659 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5660 *
5661 * @returns Register reference.
5662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5663 * @param iReg The register.
5664 */
5665DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5666{
5667 Assert(iReg < 16);
5668 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5669 return &pCtx->aGRegs[iReg].u32;
5670}
5671
5672
5673/**
5674 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5675 *
5676 * @returns Register reference.
5677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5678 * @param iReg The register.
5679 */
5680DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5681{
5682 Assert(iReg < 64);
5683 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5684 return &pCtx->aGRegs[iReg].u64;
5685}
5686
5687
5688/**
5689 * Fetches the value of a 8-bit general purpose register.
5690 *
5691 * @returns The register value.
5692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5693 * @param iReg The register.
5694 */
5695DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5696{
5697 return *iemGRegRefU8(pVCpu, iReg);
5698}
5699
5700
5701/**
5702 * Fetches the value of a 16-bit general purpose register.
5703 *
5704 * @returns The register value.
5705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5706 * @param iReg The register.
5707 */
5708DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5709{
5710 Assert(iReg < 16);
5711 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5712}
5713
5714
5715/**
5716 * Fetches the value of a 32-bit general purpose register.
5717 *
5718 * @returns The register value.
5719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5720 * @param iReg The register.
5721 */
5722DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5723{
5724 Assert(iReg < 16);
5725 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5726}
5727
5728
5729/**
5730 * Fetches the value of a 64-bit general purpose register.
5731 *
5732 * @returns The register value.
5733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5734 * @param iReg The register.
5735 */
5736DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5737{
5738 Assert(iReg < 16);
5739 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5740}
5741
5742
5743/**
5744 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5745 *
5746 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5747 * segment limit.
5748 *
5749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5750 * @param offNextInstr The offset of the next instruction.
5751 */
5752IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5753{
5754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5755 switch (pVCpu->iem.s.enmEffOpSize)
5756 {
5757 case IEMMODE_16BIT:
5758 {
5759 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5760 if ( uNewIp > pCtx->cs.u32Limit
5761 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5762 return iemRaiseGeneralProtectionFault0(pVCpu);
5763 pCtx->rip = uNewIp;
5764 break;
5765 }
5766
5767 case IEMMODE_32BIT:
5768 {
5769 Assert(pCtx->rip <= UINT32_MAX);
5770 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5771
5772 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5773 if (uNewEip > pCtx->cs.u32Limit)
5774 return iemRaiseGeneralProtectionFault0(pVCpu);
5775 pCtx->rip = uNewEip;
5776 break;
5777 }
5778
5779 case IEMMODE_64BIT:
5780 {
5781 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5782
5783 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5784 if (!IEM_IS_CANONICAL(uNewRip))
5785 return iemRaiseGeneralProtectionFault0(pVCpu);
5786 pCtx->rip = uNewRip;
5787 break;
5788 }
5789
5790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5791 }
5792
5793 pCtx->eflags.Bits.u1RF = 0;
5794
5795#ifndef IEM_WITH_CODE_TLB
5796 /* Flush the prefetch buffer. */
5797 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5798#endif
5799
5800 return VINF_SUCCESS;
5801}
5802
5803
5804/**
5805 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5806 *
5807 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5808 * segment limit.
5809 *
5810 * @returns Strict VBox status code.
5811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5812 * @param offNextInstr The offset of the next instruction.
5813 */
5814IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5815{
5816 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5817 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5818
5819 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5820 if ( uNewIp > pCtx->cs.u32Limit
5821 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5822 return iemRaiseGeneralProtectionFault0(pVCpu);
5823 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5824 pCtx->rip = uNewIp;
5825 pCtx->eflags.Bits.u1RF = 0;
5826
5827#ifndef IEM_WITH_CODE_TLB
5828 /* Flush the prefetch buffer. */
5829 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5830#endif
5831
5832 return VINF_SUCCESS;
5833}
5834
5835
5836/**
5837 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5838 *
5839 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5840 * segment limit.
5841 *
5842 * @returns Strict VBox status code.
5843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5844 * @param offNextInstr The offset of the next instruction.
5845 */
5846IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5847{
5848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5849 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5850
5851 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5852 {
5853 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5854
5855 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5856 if (uNewEip > pCtx->cs.u32Limit)
5857 return iemRaiseGeneralProtectionFault0(pVCpu);
5858 pCtx->rip = uNewEip;
5859 }
5860 else
5861 {
5862 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5863
5864 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5865 if (!IEM_IS_CANONICAL(uNewRip))
5866 return iemRaiseGeneralProtectionFault0(pVCpu);
5867 pCtx->rip = uNewRip;
5868 }
5869 pCtx->eflags.Bits.u1RF = 0;
5870
5871#ifndef IEM_WITH_CODE_TLB
5872 /* Flush the prefetch buffer. */
5873 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5874#endif
5875
5876 return VINF_SUCCESS;
5877}
5878
5879
5880/**
5881 * Performs a near jump to the specified address.
5882 *
5883 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5884 * segment limit.
5885 *
5886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5887 * @param uNewRip The new RIP value.
5888 */
5889IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
5890{
5891 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5892 switch (pVCpu->iem.s.enmEffOpSize)
5893 {
5894 case IEMMODE_16BIT:
5895 {
5896 Assert(uNewRip <= UINT16_MAX);
5897 if ( uNewRip > pCtx->cs.u32Limit
5898 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5899 return iemRaiseGeneralProtectionFault0(pVCpu);
5900 /** @todo Test 16-bit jump in 64-bit mode. */
5901 pCtx->rip = uNewRip;
5902 break;
5903 }
5904
5905 case IEMMODE_32BIT:
5906 {
5907 Assert(uNewRip <= UINT32_MAX);
5908 Assert(pCtx->rip <= UINT32_MAX);
5909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5910
5911 if (uNewRip > pCtx->cs.u32Limit)
5912 return iemRaiseGeneralProtectionFault0(pVCpu);
5913 pCtx->rip = uNewRip;
5914 break;
5915 }
5916
5917 case IEMMODE_64BIT:
5918 {
5919 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5920
5921 if (!IEM_IS_CANONICAL(uNewRip))
5922 return iemRaiseGeneralProtectionFault0(pVCpu);
5923 pCtx->rip = uNewRip;
5924 break;
5925 }
5926
5927 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5928 }
5929
5930 pCtx->eflags.Bits.u1RF = 0;
5931
5932#ifndef IEM_WITH_CODE_TLB
5933 /* Flush the prefetch buffer. */
5934 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5935#endif
5936
5937 return VINF_SUCCESS;
5938}
5939
5940
5941/**
5942 * Get the address of the top of the stack.
5943 *
5944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5945 * @param pCtx The CPU context which SP/ESP/RSP should be
5946 * read.
5947 */
5948DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
5949{
5950 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5951 return pCtx->rsp;
5952 if (pCtx->ss.Attr.n.u1DefBig)
5953 return pCtx->esp;
5954 return pCtx->sp;
5955}
5956
5957
5958/**
5959 * Updates the RIP/EIP/IP to point to the next instruction.
5960 *
5961 * This function leaves the EFLAGS.RF flag alone.
5962 *
5963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5964 * @param cbInstr The number of bytes to add.
5965 */
5966IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
5967{
5968 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5969 switch (pVCpu->iem.s.enmCpuMode)
5970 {
5971 case IEMMODE_16BIT:
5972 Assert(pCtx->rip <= UINT16_MAX);
5973 pCtx->eip += cbInstr;
5974 pCtx->eip &= UINT32_C(0xffff);
5975 break;
5976
5977 case IEMMODE_32BIT:
5978 pCtx->eip += cbInstr;
5979 Assert(pCtx->rip <= UINT32_MAX);
5980 break;
5981
5982 case IEMMODE_64BIT:
5983 pCtx->rip += cbInstr;
5984 break;
5985 default: AssertFailed();
5986 }
5987}
5988
5989
5990#if 0
5991/**
5992 * Updates the RIP/EIP/IP to point to the next instruction.
5993 *
5994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5995 */
5996IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
5997{
5998 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
5999}
6000#endif
6001
6002
6003
6004/**
6005 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6006 *
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 * @param cbInstr The number of bytes to add.
6009 */
6010IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6011{
6012 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6013
6014 pCtx->eflags.Bits.u1RF = 0;
6015
6016 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6017#if ARCH_BITS >= 64
6018 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6019 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6020 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6021#else
6022 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6023 pCtx->rip += cbInstr;
6024 else
6025 {
6026 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6027 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6028 }
6029#endif
6030}
6031
6032
6033/**
6034 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6035 *
6036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6037 */
6038IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6039{
6040 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6041}
6042
6043
6044/**
6045 * Adds to the stack pointer.
6046 *
6047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6048 * @param pCtx The CPU context which SP/ESP/RSP should be
6049 * updated.
6050 * @param cbToAdd The number of bytes to add (8-bit!).
6051 */
6052DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6053{
6054 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6055 pCtx->rsp += cbToAdd;
6056 else if (pCtx->ss.Attr.n.u1DefBig)
6057 pCtx->esp += cbToAdd;
6058 else
6059 pCtx->sp += cbToAdd;
6060}
6061
6062
6063/**
6064 * Subtracts from the stack pointer.
6065 *
6066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6067 * @param pCtx The CPU context which SP/ESP/RSP should be
6068 * updated.
6069 * @param cbToSub The number of bytes to subtract (8-bit!).
6070 */
6071DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6072{
6073 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6074 pCtx->rsp -= cbToSub;
6075 else if (pCtx->ss.Attr.n.u1DefBig)
6076 pCtx->esp -= cbToSub;
6077 else
6078 pCtx->sp -= cbToSub;
6079}
6080
6081
6082/**
6083 * Adds to the temporary stack pointer.
6084 *
6085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6086 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6087 * @param cbToAdd The number of bytes to add (16-bit).
6088 * @param pCtx Where to get the current stack mode.
6089 */
6090DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6091{
6092 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6093 pTmpRsp->u += cbToAdd;
6094 else if (pCtx->ss.Attr.n.u1DefBig)
6095 pTmpRsp->DWords.dw0 += cbToAdd;
6096 else
6097 pTmpRsp->Words.w0 += cbToAdd;
6098}
6099
6100
6101/**
6102 * Subtracts from the temporary stack pointer.
6103 *
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6106 * @param cbToSub The number of bytes to subtract.
6107 * @param pCtx Where to get the current stack mode.
6108 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6109 * expecting that.
6110 */
6111DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6112{
6113 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6114 pTmpRsp->u -= cbToSub;
6115 else if (pCtx->ss.Attr.n.u1DefBig)
6116 pTmpRsp->DWords.dw0 -= cbToSub;
6117 else
6118 pTmpRsp->Words.w0 -= cbToSub;
6119}
6120
6121
6122/**
6123 * Calculates the effective stack address for a push of the specified size as
6124 * well as the new RSP value (upper bits may be masked).
6125 *
6126 * @returns Effective stack addressf for the push.
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param pCtx Where to get the current stack mode.
6129 * @param cbItem The size of the stack item to pop.
6130 * @param puNewRsp Where to return the new RSP value.
6131 */
6132DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6133{
6134 RTUINT64U uTmpRsp;
6135 RTGCPTR GCPtrTop;
6136 uTmpRsp.u = pCtx->rsp;
6137
6138 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6139 GCPtrTop = uTmpRsp.u -= cbItem;
6140 else if (pCtx->ss.Attr.n.u1DefBig)
6141 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6142 else
6143 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6144 *puNewRsp = uTmpRsp.u;
6145 return GCPtrTop;
6146}
6147
6148
6149/**
6150 * Gets the current stack pointer and calculates the value after a pop of the
6151 * specified size.
6152 *
6153 * @returns Current stack pointer.
6154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6155 * @param pCtx Where to get the current stack mode.
6156 * @param cbItem The size of the stack item to pop.
6157 * @param puNewRsp Where to return the new RSP value.
6158 */
6159DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6160{
6161 RTUINT64U uTmpRsp;
6162 RTGCPTR GCPtrTop;
6163 uTmpRsp.u = pCtx->rsp;
6164
6165 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6166 {
6167 GCPtrTop = uTmpRsp.u;
6168 uTmpRsp.u += cbItem;
6169 }
6170 else if (pCtx->ss.Attr.n.u1DefBig)
6171 {
6172 GCPtrTop = uTmpRsp.DWords.dw0;
6173 uTmpRsp.DWords.dw0 += cbItem;
6174 }
6175 else
6176 {
6177 GCPtrTop = uTmpRsp.Words.w0;
6178 uTmpRsp.Words.w0 += cbItem;
6179 }
6180 *puNewRsp = uTmpRsp.u;
6181 return GCPtrTop;
6182}
6183
6184
6185/**
6186 * Calculates the effective stack address for a push of the specified size as
6187 * well as the new temporary RSP value (upper bits may be masked).
6188 *
6189 * @returns Effective stack addressf for the push.
6190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6191 * @param pCtx Where to get the current stack mode.
6192 * @param pTmpRsp The temporary stack pointer. This is updated.
6193 * @param cbItem The size of the stack item to pop.
6194 */
6195DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6196{
6197 RTGCPTR GCPtrTop;
6198
6199 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6200 GCPtrTop = pTmpRsp->u -= cbItem;
6201 else if (pCtx->ss.Attr.n.u1DefBig)
6202 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6203 else
6204 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6205 return GCPtrTop;
6206}
6207
6208
6209/**
6210 * Gets the effective stack address for a pop of the specified size and
6211 * calculates and updates the temporary RSP.
6212 *
6213 * @returns Current stack pointer.
6214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6215 * @param pCtx Where to get the current stack mode.
6216 * @param pTmpRsp The temporary stack pointer. This is updated.
6217 * @param cbItem The size of the stack item to pop.
6218 */
6219DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6220{
6221 RTGCPTR GCPtrTop;
6222 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6223 {
6224 GCPtrTop = pTmpRsp->u;
6225 pTmpRsp->u += cbItem;
6226 }
6227 else if (pCtx->ss.Attr.n.u1DefBig)
6228 {
6229 GCPtrTop = pTmpRsp->DWords.dw0;
6230 pTmpRsp->DWords.dw0 += cbItem;
6231 }
6232 else
6233 {
6234 GCPtrTop = pTmpRsp->Words.w0;
6235 pTmpRsp->Words.w0 += cbItem;
6236 }
6237 return GCPtrTop;
6238}
6239
6240/** @} */
6241
6242
6243/** @name FPU access and helpers.
6244 *
6245 * @{
6246 */
6247
6248
6249/**
6250 * Hook for preparing to use the host FPU.
6251 *
6252 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6253 *
6254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6255 */
6256DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6257{
6258#ifdef IN_RING3
6259 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6260#else
6261 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6262#endif
6263}
6264
6265
6266/**
6267 * Hook for preparing to use the host FPU for SSE
6268 *
6269 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6270 *
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 */
6273DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6274{
6275 iemFpuPrepareUsage(pVCpu);
6276}
6277
6278
6279/**
6280 * Hook for actualizing the guest FPU state before the interpreter reads it.
6281 *
6282 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6283 *
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 */
6286DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6287{
6288#ifdef IN_RING3
6289 NOREF(pVCpu);
6290#else
6291 CPUMRZFpuStateActualizeForRead(pVCpu);
6292#endif
6293}
6294
6295
6296/**
6297 * Hook for actualizing the guest FPU state before the interpreter changes it.
6298 *
6299 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6300 *
6301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6302 */
6303DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6304{
6305#ifdef IN_RING3
6306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6307#else
6308 CPUMRZFpuStateActualizeForChange(pVCpu);
6309#endif
6310}
6311
6312
6313/**
6314 * Hook for actualizing the guest XMM0..15 register state for read only.
6315 *
6316 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6317 *
6318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6319 */
6320DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6321{
6322#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6323 NOREF(pVCpu);
6324#else
6325 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6326#endif
6327}
6328
6329
6330/**
6331 * Hook for actualizing the guest XMM0..15 register state for read+write.
6332 *
6333 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6334 *
6335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6336 */
6337DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6338{
6339#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6340 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6341#else
6342 CPUMRZFpuStateActualizeForChange(pVCpu);
6343#endif
6344}
6345
6346
6347/**
6348 * Stores a QNaN value into a FPU register.
6349 *
6350 * @param pReg Pointer to the register.
6351 */
6352DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6353{
6354 pReg->au32[0] = UINT32_C(0x00000000);
6355 pReg->au32[1] = UINT32_C(0xc0000000);
6356 pReg->au16[4] = UINT16_C(0xffff);
6357}
6358
6359
6360/**
6361 * Updates the FOP, FPU.CS and FPUIP registers.
6362 *
6363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6364 * @param pCtx The CPU context.
6365 * @param pFpuCtx The FPU context.
6366 */
6367DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6368{
6369 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6370 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6371 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6372 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6373 {
6374 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6375 * happens in real mode here based on the fnsave and fnstenv images. */
6376 pFpuCtx->CS = 0;
6377 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6378 }
6379 else
6380 {
6381 pFpuCtx->CS = pCtx->cs.Sel;
6382 pFpuCtx->FPUIP = pCtx->rip;
6383 }
6384}
6385
6386
6387/**
6388 * Updates the x87.DS and FPUDP registers.
6389 *
6390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6391 * @param pCtx The CPU context.
6392 * @param pFpuCtx The FPU context.
6393 * @param iEffSeg The effective segment register.
6394 * @param GCPtrEff The effective address relative to @a iEffSeg.
6395 */
6396DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6397{
6398 RTSEL sel;
6399 switch (iEffSeg)
6400 {
6401 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6402 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6403 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6404 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6405 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6406 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6407 default:
6408 AssertMsgFailed(("%d\n", iEffSeg));
6409 sel = pCtx->ds.Sel;
6410 }
6411 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6412 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6413 {
6414 pFpuCtx->DS = 0;
6415 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6416 }
6417 else
6418 {
6419 pFpuCtx->DS = sel;
6420 pFpuCtx->FPUDP = GCPtrEff;
6421 }
6422}
6423
6424
6425/**
6426 * Rotates the stack registers in the push direction.
6427 *
6428 * @param pFpuCtx The FPU context.
6429 * @remarks This is a complete waste of time, but fxsave stores the registers in
6430 * stack order.
6431 */
6432DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6433{
6434 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6435 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6436 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6437 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6438 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6439 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6440 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6441 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6442 pFpuCtx->aRegs[0].r80 = r80Tmp;
6443}
6444
6445
6446/**
6447 * Rotates the stack registers in the pop direction.
6448 *
6449 * @param pFpuCtx The FPU context.
6450 * @remarks This is a complete waste of time, but fxsave stores the registers in
6451 * stack order.
6452 */
6453DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6454{
6455 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6456 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6457 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6458 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6459 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6460 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6461 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6462 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6463 pFpuCtx->aRegs[7].r80 = r80Tmp;
6464}
6465
6466
6467/**
6468 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6469 * exception prevents it.
6470 *
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 * @param pResult The FPU operation result to push.
6473 * @param pFpuCtx The FPU context.
6474 */
6475IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6476{
6477 /* Update FSW and bail if there are pending exceptions afterwards. */
6478 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6479 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6480 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6481 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6482 {
6483 pFpuCtx->FSW = fFsw;
6484 return;
6485 }
6486
6487 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6488 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6489 {
6490 /* All is fine, push the actual value. */
6491 pFpuCtx->FTW |= RT_BIT(iNewTop);
6492 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6493 }
6494 else if (pFpuCtx->FCW & X86_FCW_IM)
6495 {
6496 /* Masked stack overflow, push QNaN. */
6497 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6498 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6499 }
6500 else
6501 {
6502 /* Raise stack overflow, don't push anything. */
6503 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6504 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6505 return;
6506 }
6507
6508 fFsw &= ~X86_FSW_TOP_MASK;
6509 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6510 pFpuCtx->FSW = fFsw;
6511
6512 iemFpuRotateStackPush(pFpuCtx);
6513}
6514
6515
6516/**
6517 * Stores a result in a FPU register and updates the FSW and FTW.
6518 *
6519 * @param pFpuCtx The FPU context.
6520 * @param pResult The result to store.
6521 * @param iStReg Which FPU register to store it in.
6522 */
6523IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6524{
6525 Assert(iStReg < 8);
6526 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6527 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6528 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6529 pFpuCtx->FTW |= RT_BIT(iReg);
6530 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6531}
6532
6533
6534/**
6535 * Only updates the FPU status word (FSW) with the result of the current
6536 * instruction.
6537 *
6538 * @param pFpuCtx The FPU context.
6539 * @param u16FSW The FSW output of the current instruction.
6540 */
6541IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6542{
6543 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6544 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6545}
6546
6547
6548/**
6549 * Pops one item off the FPU stack if no pending exception prevents it.
6550 *
6551 * @param pFpuCtx The FPU context.
6552 */
6553IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6554{
6555 /* Check pending exceptions. */
6556 uint16_t uFSW = pFpuCtx->FSW;
6557 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6558 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6559 return;
6560
6561 /* TOP--. */
6562 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6563 uFSW &= ~X86_FSW_TOP_MASK;
6564 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6565 pFpuCtx->FSW = uFSW;
6566
6567 /* Mark the previous ST0 as empty. */
6568 iOldTop >>= X86_FSW_TOP_SHIFT;
6569 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6570
6571 /* Rotate the registers. */
6572 iemFpuRotateStackPop(pFpuCtx);
6573}
6574
6575
6576/**
6577 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6578 *
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 * @param pResult The FPU operation result to push.
6581 */
6582IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6583{
6584 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6585 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6586 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6587 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6588}
6589
6590
6591/**
6592 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6593 * and sets FPUDP and FPUDS.
6594 *
6595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6596 * @param pResult The FPU operation result to push.
6597 * @param iEffSeg The effective segment register.
6598 * @param GCPtrEff The effective address relative to @a iEffSeg.
6599 */
6600IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6601{
6602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6603 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6604 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6606 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6607}
6608
6609
6610/**
6611 * Replace ST0 with the first value and push the second onto the FPU stack,
6612 * unless a pending exception prevents it.
6613 *
6614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6615 * @param pResult The FPU operation result to store and push.
6616 */
6617IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6618{
6619 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6620 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6621 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6622
6623 /* Update FSW and bail if there are pending exceptions afterwards. */
6624 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6625 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6626 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6627 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6628 {
6629 pFpuCtx->FSW = fFsw;
6630 return;
6631 }
6632
6633 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6634 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6635 {
6636 /* All is fine, push the actual value. */
6637 pFpuCtx->FTW |= RT_BIT(iNewTop);
6638 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6639 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6640 }
6641 else if (pFpuCtx->FCW & X86_FCW_IM)
6642 {
6643 /* Masked stack overflow, push QNaN. */
6644 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6645 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6646 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6647 }
6648 else
6649 {
6650 /* Raise stack overflow, don't push anything. */
6651 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6652 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6653 return;
6654 }
6655
6656 fFsw &= ~X86_FSW_TOP_MASK;
6657 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6658 pFpuCtx->FSW = fFsw;
6659
6660 iemFpuRotateStackPush(pFpuCtx);
6661}
6662
6663
6664/**
6665 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6666 * FOP.
6667 *
6668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6669 * @param pResult The result to store.
6670 * @param iStReg Which FPU register to store it in.
6671 */
6672IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6673{
6674 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6675 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6676 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6677 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6678}
6679
6680
6681/**
6682 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6683 * FOP, and then pops the stack.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 * @param pResult The result to store.
6687 * @param iStReg Which FPU register to store it in.
6688 */
6689IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6690{
6691 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6692 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6693 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6694 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6695 iemFpuMaybePopOne(pFpuCtx);
6696}
6697
6698
6699/**
6700 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6701 * FPUDP, and FPUDS.
6702 *
6703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6704 * @param pResult The result to store.
6705 * @param iStReg Which FPU register to store it in.
6706 * @param iEffSeg The effective memory operand selector register.
6707 * @param GCPtrEff The effective memory operand offset.
6708 */
6709IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6710 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6711{
6712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6713 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6714 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6715 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6716 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6717}
6718
6719
6720/**
6721 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6722 * FPUDP, and FPUDS, and then pops the stack.
6723 *
6724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6725 * @param pResult The result to store.
6726 * @param iStReg Which FPU register to store it in.
6727 * @param iEffSeg The effective memory operand selector register.
6728 * @param GCPtrEff The effective memory operand offset.
6729 */
6730IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6731 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6732{
6733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6735 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6736 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6737 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6738 iemFpuMaybePopOne(pFpuCtx);
6739}
6740
6741
6742/**
6743 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6744 *
6745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6746 */
6747IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6748{
6749 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6750 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6752}
6753
6754
6755/**
6756 * Marks the specified stack register as free (for FFREE).
6757 *
6758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6759 * @param iStReg The register to free.
6760 */
6761IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6762{
6763 Assert(iStReg < 8);
6764 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6765 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6766 pFpuCtx->FTW &= ~RT_BIT(iReg);
6767}
6768
6769
6770/**
6771 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6772 *
6773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6774 */
6775IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6776{
6777 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6778 uint16_t uFsw = pFpuCtx->FSW;
6779 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6780 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6781 uFsw &= ~X86_FSW_TOP_MASK;
6782 uFsw |= uTop;
6783 pFpuCtx->FSW = uFsw;
6784}
6785
6786
6787/**
6788 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6789 *
6790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6791 */
6792IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6793{
6794 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6795 uint16_t uFsw = pFpuCtx->FSW;
6796 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6797 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6798 uFsw &= ~X86_FSW_TOP_MASK;
6799 uFsw |= uTop;
6800 pFpuCtx->FSW = uFsw;
6801}
6802
6803
6804/**
6805 * Updates the FSW, FOP, FPUIP, and FPUCS.
6806 *
6807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6808 * @param u16FSW The FSW from the current instruction.
6809 */
6810IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6811{
6812 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6813 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6814 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6815 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6816}
6817
6818
6819/**
6820 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6821 *
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 * @param u16FSW The FSW from the current instruction.
6824 */
6825IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6826{
6827 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6828 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6829 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6830 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6831 iemFpuMaybePopOne(pFpuCtx);
6832}
6833
6834
6835/**
6836 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6837 *
6838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6839 * @param u16FSW The FSW from the current instruction.
6840 * @param iEffSeg The effective memory operand selector register.
6841 * @param GCPtrEff The effective memory operand offset.
6842 */
6843IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6844{
6845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6846 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6847 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6848 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6849 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6850}
6851
6852
6853/**
6854 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6855 *
6856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6857 * @param u16FSW The FSW from the current instruction.
6858 */
6859IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6860{
6861 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6862 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6863 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6864 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6865 iemFpuMaybePopOne(pFpuCtx);
6866 iemFpuMaybePopOne(pFpuCtx);
6867}
6868
6869
6870/**
6871 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
6872 *
6873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6874 * @param u16FSW The FSW from the current instruction.
6875 * @param iEffSeg The effective memory operand selector register.
6876 * @param GCPtrEff The effective memory operand offset.
6877 */
6878IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6879{
6880 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6881 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6882 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6883 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6884 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6885 iemFpuMaybePopOne(pFpuCtx);
6886}
6887
6888
6889/**
6890 * Worker routine for raising an FPU stack underflow exception.
6891 *
6892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6893 * @param pFpuCtx The FPU context.
6894 * @param iStReg The stack register being accessed.
6895 */
6896IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
6897{
6898 Assert(iStReg < 8 || iStReg == UINT8_MAX);
6899 if (pFpuCtx->FCW & X86_FCW_IM)
6900 {
6901 /* Masked underflow. */
6902 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6903 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6904 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6905 if (iStReg != UINT8_MAX)
6906 {
6907 pFpuCtx->FTW |= RT_BIT(iReg);
6908 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6909 }
6910 }
6911 else
6912 {
6913 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6914 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6915 }
6916}
6917
6918
6919/**
6920 * Raises a FPU stack underflow exception.
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 * @param iStReg The destination register that should be loaded
6924 * with QNaN if \#IS is not masked. Specify
6925 * UINT8_MAX if none (like for fcom).
6926 */
6927DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
6928{
6929 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6930 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6931 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6932 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6933}
6934
6935
6936DECL_NO_INLINE(IEM_STATIC, void)
6937iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6938{
6939 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6940 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6941 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6943 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6944}
6945
6946
6947DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
6948{
6949 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6950 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6951 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6952 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6953 iemFpuMaybePopOne(pFpuCtx);
6954}
6955
6956
6957DECL_NO_INLINE(IEM_STATIC, void)
6958iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6959{
6960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6961 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6962 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6963 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6964 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6965 iemFpuMaybePopOne(pFpuCtx);
6966}
6967
6968
6969DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
6970{
6971 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6972 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6973 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6974 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
6975 iemFpuMaybePopOne(pFpuCtx);
6976 iemFpuMaybePopOne(pFpuCtx);
6977}
6978
6979
6980DECL_NO_INLINE(IEM_STATIC, void)
6981iemFpuStackPushUnderflow(PVMCPU pVCpu)
6982{
6983 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6984 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6985 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6986
6987 if (pFpuCtx->FCW & X86_FCW_IM)
6988 {
6989 /* Masked overflow - Push QNaN. */
6990 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6991 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6992 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6993 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6994 pFpuCtx->FTW |= RT_BIT(iNewTop);
6995 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6996 iemFpuRotateStackPush(pFpuCtx);
6997 }
6998 else
6999 {
7000 /* Exception pending - don't change TOP or the register stack. */
7001 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7002 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7003 }
7004}
7005
7006
7007DECL_NO_INLINE(IEM_STATIC, void)
7008iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7009{
7010 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7011 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7012 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7013
7014 if (pFpuCtx->FCW & X86_FCW_IM)
7015 {
7016 /* Masked overflow - Push QNaN. */
7017 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7018 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7019 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7020 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7021 pFpuCtx->FTW |= RT_BIT(iNewTop);
7022 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7023 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7024 iemFpuRotateStackPush(pFpuCtx);
7025 }
7026 else
7027 {
7028 /* Exception pending - don't change TOP or the register stack. */
7029 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7030 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7031 }
7032}
7033
7034
7035/**
7036 * Worker routine for raising an FPU stack overflow exception on a push.
7037 *
7038 * @param pFpuCtx The FPU context.
7039 */
7040IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7041{
7042 if (pFpuCtx->FCW & X86_FCW_IM)
7043 {
7044 /* Masked overflow. */
7045 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7046 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7047 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7048 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7049 pFpuCtx->FTW |= RT_BIT(iNewTop);
7050 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7051 iemFpuRotateStackPush(pFpuCtx);
7052 }
7053 else
7054 {
7055 /* Exception pending - don't change TOP or the register stack. */
7056 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7057 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7058 }
7059}
7060
7061
7062/**
7063 * Raises a FPU stack overflow exception on a push.
7064 *
7065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7066 */
7067DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7068{
7069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7070 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7071 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7072 iemFpuStackPushOverflowOnly(pFpuCtx);
7073}
7074
7075
7076/**
7077 * Raises a FPU stack overflow exception on a push with a memory operand.
7078 *
7079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7080 * @param iEffSeg The effective memory operand selector register.
7081 * @param GCPtrEff The effective memory operand offset.
7082 */
7083DECL_NO_INLINE(IEM_STATIC, void)
7084iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7085{
7086 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7087 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7088 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7089 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7090 iemFpuStackPushOverflowOnly(pFpuCtx);
7091}
7092
7093
7094IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7095{
7096 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7097 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7098 if (pFpuCtx->FTW & RT_BIT(iReg))
7099 return VINF_SUCCESS;
7100 return VERR_NOT_FOUND;
7101}
7102
7103
7104IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7105{
7106 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7107 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7108 if (pFpuCtx->FTW & RT_BIT(iReg))
7109 {
7110 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7111 return VINF_SUCCESS;
7112 }
7113 return VERR_NOT_FOUND;
7114}
7115
7116
7117IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7118 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7119{
7120 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7121 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7122 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7123 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7124 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7125 {
7126 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7127 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7128 return VINF_SUCCESS;
7129 }
7130 return VERR_NOT_FOUND;
7131}
7132
7133
7134IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7135{
7136 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7137 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7138 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7139 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7140 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7141 {
7142 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7143 return VINF_SUCCESS;
7144 }
7145 return VERR_NOT_FOUND;
7146}
7147
7148
7149/**
7150 * Updates the FPU exception status after FCW is changed.
7151 *
7152 * @param pFpuCtx The FPU context.
7153 */
7154IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7155{
7156 uint16_t u16Fsw = pFpuCtx->FSW;
7157 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7158 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7159 else
7160 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7161 pFpuCtx->FSW = u16Fsw;
7162}
7163
7164
7165/**
7166 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7167 *
7168 * @returns The full FTW.
7169 * @param pFpuCtx The FPU context.
7170 */
7171IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7172{
7173 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7174 uint16_t u16Ftw = 0;
7175 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7176 for (unsigned iSt = 0; iSt < 8; iSt++)
7177 {
7178 unsigned const iReg = (iSt + iTop) & 7;
7179 if (!(u8Ftw & RT_BIT(iReg)))
7180 u16Ftw |= 3 << (iReg * 2); /* empty */
7181 else
7182 {
7183 uint16_t uTag;
7184 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7185 if (pr80Reg->s.uExponent == 0x7fff)
7186 uTag = 2; /* Exponent is all 1's => Special. */
7187 else if (pr80Reg->s.uExponent == 0x0000)
7188 {
7189 if (pr80Reg->s.u64Mantissa == 0x0000)
7190 uTag = 1; /* All bits are zero => Zero. */
7191 else
7192 uTag = 2; /* Must be special. */
7193 }
7194 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7195 uTag = 0; /* Valid. */
7196 else
7197 uTag = 2; /* Must be special. */
7198
7199 u16Ftw |= uTag << (iReg * 2); /* empty */
7200 }
7201 }
7202
7203 return u16Ftw;
7204}
7205
7206
7207/**
7208 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7209 *
7210 * @returns The compressed FTW.
7211 * @param u16FullFtw The full FTW to convert.
7212 */
7213IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7214{
7215 uint8_t u8Ftw = 0;
7216 for (unsigned i = 0; i < 8; i++)
7217 {
7218 if ((u16FullFtw & 3) != 3 /*empty*/)
7219 u8Ftw |= RT_BIT(i);
7220 u16FullFtw >>= 2;
7221 }
7222
7223 return u8Ftw;
7224}
7225
7226/** @} */
7227
7228
7229/** @name Memory access.
7230 *
7231 * @{
7232 */
7233
7234
7235/**
7236 * Updates the IEMCPU::cbWritten counter if applicable.
7237 *
7238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7239 * @param fAccess The access being accounted for.
7240 * @param cbMem The access size.
7241 */
7242DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7243{
7244 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7245 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7246 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7247}
7248
7249
7250/**
7251 * Checks if the given segment can be written to, raise the appropriate
7252 * exception if not.
7253 *
7254 * @returns VBox strict status code.
7255 *
7256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7257 * @param pHid Pointer to the hidden register.
7258 * @param iSegReg The register number.
7259 * @param pu64BaseAddr Where to return the base address to use for the
7260 * segment. (In 64-bit code it may differ from the
7261 * base in the hidden segment.)
7262 */
7263IEM_STATIC VBOXSTRICTRC
7264iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7265{
7266 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7267 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7268 else
7269 {
7270 if (!pHid->Attr.n.u1Present)
7271 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7272
7273 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7274 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7275 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7276 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7277 *pu64BaseAddr = pHid->u64Base;
7278 }
7279 return VINF_SUCCESS;
7280}
7281
7282
7283/**
7284 * Checks if the given segment can be read from, raise the appropriate
7285 * exception if not.
7286 *
7287 * @returns VBox strict status code.
7288 *
7289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7290 * @param pHid Pointer to the hidden register.
7291 * @param iSegReg The register number.
7292 * @param pu64BaseAddr Where to return the base address to use for the
7293 * segment. (In 64-bit code it may differ from the
7294 * base in the hidden segment.)
7295 */
7296IEM_STATIC VBOXSTRICTRC
7297iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7298{
7299 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7300 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7301 else
7302 {
7303 if (!pHid->Attr.n.u1Present)
7304 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7305
7306 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7307 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7308 *pu64BaseAddr = pHid->u64Base;
7309 }
7310 return VINF_SUCCESS;
7311}
7312
7313
7314/**
7315 * Applies the segment limit, base and attributes.
7316 *
7317 * This may raise a \#GP or \#SS.
7318 *
7319 * @returns VBox strict status code.
7320 *
7321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7322 * @param fAccess The kind of access which is being performed.
7323 * @param iSegReg The index of the segment register to apply.
7324 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7325 * TSS, ++).
7326 * @param cbMem The access size.
7327 * @param pGCPtrMem Pointer to the guest memory address to apply
7328 * segmentation to. Input and output parameter.
7329 */
7330IEM_STATIC VBOXSTRICTRC
7331iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7332{
7333 if (iSegReg == UINT8_MAX)
7334 return VINF_SUCCESS;
7335
7336 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7337 switch (pVCpu->iem.s.enmCpuMode)
7338 {
7339 case IEMMODE_16BIT:
7340 case IEMMODE_32BIT:
7341 {
7342 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7343 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7344
7345 if ( pSel->Attr.n.u1Present
7346 && !pSel->Attr.n.u1Unusable)
7347 {
7348 Assert(pSel->Attr.n.u1DescType);
7349 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7350 {
7351 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7352 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7353 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7354
7355 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7356 {
7357 /** @todo CPL check. */
7358 }
7359
7360 /*
7361 * There are two kinds of data selectors, normal and expand down.
7362 */
7363 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7364 {
7365 if ( GCPtrFirst32 > pSel->u32Limit
7366 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7367 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7368 }
7369 else
7370 {
7371 /*
7372 * The upper boundary is defined by the B bit, not the G bit!
7373 */
7374 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7375 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7376 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7377 }
7378 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7379 }
7380 else
7381 {
7382
7383 /*
7384 * Code selector and usually be used to read thru, writing is
7385 * only permitted in real and V8086 mode.
7386 */
7387 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7388 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7389 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7390 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7391 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7392
7393 if ( GCPtrFirst32 > pSel->u32Limit
7394 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7395 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7396
7397 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7398 {
7399 /** @todo CPL check. */
7400 }
7401
7402 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7403 }
7404 }
7405 else
7406 return iemRaiseGeneralProtectionFault0(pVCpu);
7407 return VINF_SUCCESS;
7408 }
7409
7410 case IEMMODE_64BIT:
7411 {
7412 RTGCPTR GCPtrMem = *pGCPtrMem;
7413 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7414 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7415
7416 Assert(cbMem >= 1);
7417 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7418 return VINF_SUCCESS;
7419 return iemRaiseGeneralProtectionFault0(pVCpu);
7420 }
7421
7422 default:
7423 AssertFailedReturn(VERR_IEM_IPE_7);
7424 }
7425}
7426
7427
7428/**
7429 * Translates a virtual address to a physical physical address and checks if we
7430 * can access the page as specified.
7431 *
7432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7433 * @param GCPtrMem The virtual address.
7434 * @param fAccess The intended access.
7435 * @param pGCPhysMem Where to return the physical address.
7436 */
7437IEM_STATIC VBOXSTRICTRC
7438iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7439{
7440 /** @todo Need a different PGM interface here. We're currently using
7441 * generic / REM interfaces. this won't cut it for R0 & RC. */
7442 RTGCPHYS GCPhys;
7443 uint64_t fFlags;
7444 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7445 if (RT_FAILURE(rc))
7446 {
7447 /** @todo Check unassigned memory in unpaged mode. */
7448 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7449 *pGCPhysMem = NIL_RTGCPHYS;
7450 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7451 }
7452
7453 /* If the page is writable and does not have the no-exec bit set, all
7454 access is allowed. Otherwise we'll have to check more carefully... */
7455 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7456 {
7457 /* Write to read only memory? */
7458 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7459 && !(fFlags & X86_PTE_RW)
7460 && ( pVCpu->iem.s.uCpl != 0
7461 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7462 {
7463 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7464 *pGCPhysMem = NIL_RTGCPHYS;
7465 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7466 }
7467
7468 /* Kernel memory accessed by userland? */
7469 if ( !(fFlags & X86_PTE_US)
7470 && pVCpu->iem.s.uCpl == 3
7471 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7472 {
7473 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7474 *pGCPhysMem = NIL_RTGCPHYS;
7475 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7476 }
7477
7478 /* Executing non-executable memory? */
7479 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7480 && (fFlags & X86_PTE_PAE_NX)
7481 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7482 {
7483 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7484 *pGCPhysMem = NIL_RTGCPHYS;
7485 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7486 VERR_ACCESS_DENIED);
7487 }
7488 }
7489
7490 /*
7491 * Set the dirty / access flags.
7492 * ASSUMES this is set when the address is translated rather than on committ...
7493 */
7494 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7495 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7496 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7497 {
7498 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7499 AssertRC(rc2);
7500 }
7501
7502 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7503 *pGCPhysMem = GCPhys;
7504 return VINF_SUCCESS;
7505}
7506
7507
7508
7509/**
7510 * Maps a physical page.
7511 *
7512 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param GCPhysMem The physical address.
7515 * @param fAccess The intended access.
7516 * @param ppvMem Where to return the mapping address.
7517 * @param pLock The PGM lock.
7518 */
7519IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7520{
7521#ifdef IEM_VERIFICATION_MODE_FULL
7522 /* Force the alternative path so we can ignore writes. */
7523 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7524 {
7525 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7526 {
7527 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7528 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7529 if (RT_FAILURE(rc2))
7530 pVCpu->iem.s.fProblematicMemory = true;
7531 }
7532 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7533 }
7534#endif
7535#ifdef IEM_LOG_MEMORY_WRITES
7536 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7537 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7538#endif
7539#ifdef IEM_VERIFICATION_MODE_MINIMAL
7540 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7541#endif
7542
7543 /** @todo This API may require some improving later. A private deal with PGM
7544 * regarding locking and unlocking needs to be struct. A couple of TLBs
7545 * living in PGM, but with publicly accessible inlined access methods
7546 * could perhaps be an even better solution. */
7547 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7548 GCPhysMem,
7549 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7550 pVCpu->iem.s.fBypassHandlers,
7551 ppvMem,
7552 pLock);
7553 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7554 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7555
7556#ifdef IEM_VERIFICATION_MODE_FULL
7557 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7558 pVCpu->iem.s.fProblematicMemory = true;
7559#endif
7560 return rc;
7561}
7562
7563
7564/**
7565 * Unmap a page previously mapped by iemMemPageMap.
7566 *
7567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7568 * @param GCPhysMem The physical address.
7569 * @param fAccess The intended access.
7570 * @param pvMem What iemMemPageMap returned.
7571 * @param pLock The PGM lock.
7572 */
7573DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7574{
7575 NOREF(pVCpu);
7576 NOREF(GCPhysMem);
7577 NOREF(fAccess);
7578 NOREF(pvMem);
7579 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7580}
7581
7582
7583/**
7584 * Looks up a memory mapping entry.
7585 *
7586 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7588 * @param pvMem The memory address.
7589 * @param fAccess The access to.
7590 */
7591DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7592{
7593 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7594 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7595 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7596 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7597 return 0;
7598 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7599 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7600 return 1;
7601 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7602 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7603 return 2;
7604 return VERR_NOT_FOUND;
7605}
7606
7607
7608/**
7609 * Finds a free memmap entry when using iNextMapping doesn't work.
7610 *
7611 * @returns Memory mapping index, 1024 on failure.
7612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7613 */
7614IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7615{
7616 /*
7617 * The easy case.
7618 */
7619 if (pVCpu->iem.s.cActiveMappings == 0)
7620 {
7621 pVCpu->iem.s.iNextMapping = 1;
7622 return 0;
7623 }
7624
7625 /* There should be enough mappings for all instructions. */
7626 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7627
7628 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7629 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7630 return i;
7631
7632 AssertFailedReturn(1024);
7633}
7634
7635
7636/**
7637 * Commits a bounce buffer that needs writing back and unmaps it.
7638 *
7639 * @returns Strict VBox status code.
7640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7641 * @param iMemMap The index of the buffer to commit.
7642 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7643 * Always false in ring-3, obviously.
7644 */
7645IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7646{
7647 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7648 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7649#ifdef IN_RING3
7650 Assert(!fPostponeFail);
7651#endif
7652
7653 /*
7654 * Do the writing.
7655 */
7656#ifndef IEM_VERIFICATION_MODE_MINIMAL
7657 PVM pVM = pVCpu->CTX_SUFF(pVM);
7658 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7659 && !IEM_VERIFICATION_ENABLED(pVCpu))
7660 {
7661 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7662 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7663 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7664 if (!pVCpu->iem.s.fBypassHandlers)
7665 {
7666 /*
7667 * Carefully and efficiently dealing with access handler return
7668 * codes make this a little bloated.
7669 */
7670 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7671 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7672 pbBuf,
7673 cbFirst,
7674 PGMACCESSORIGIN_IEM);
7675 if (rcStrict == VINF_SUCCESS)
7676 {
7677 if (cbSecond)
7678 {
7679 rcStrict = PGMPhysWrite(pVM,
7680 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7681 pbBuf + cbFirst,
7682 cbSecond,
7683 PGMACCESSORIGIN_IEM);
7684 if (rcStrict == VINF_SUCCESS)
7685 { /* nothing */ }
7686 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7687 {
7688 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7689 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7690 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7691 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7692 }
7693# ifndef IN_RING3
7694 else if (fPostponeFail)
7695 {
7696 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7699 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7700 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7701 return iemSetPassUpStatus(pVCpu, rcStrict);
7702 }
7703# endif
7704 else
7705 {
7706 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7707 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7708 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7709 return rcStrict;
7710 }
7711 }
7712 }
7713 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7714 {
7715 if (!cbSecond)
7716 {
7717 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7718 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7719 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7720 }
7721 else
7722 {
7723 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7724 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7725 pbBuf + cbFirst,
7726 cbSecond,
7727 PGMACCESSORIGIN_IEM);
7728 if (rcStrict2 == VINF_SUCCESS)
7729 {
7730 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7733 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7734 }
7735 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7736 {
7737 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7740 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7741 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7742 }
7743# ifndef IN_RING3
7744 else if (fPostponeFail)
7745 {
7746 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7747 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7748 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7749 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7750 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7751 return iemSetPassUpStatus(pVCpu, rcStrict);
7752 }
7753# endif
7754 else
7755 {
7756 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7759 return rcStrict2;
7760 }
7761 }
7762 }
7763# ifndef IN_RING3
7764 else if (fPostponeFail)
7765 {
7766 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7767 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7768 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7769 if (!cbSecond)
7770 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7771 else
7772 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7773 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7774 return iemSetPassUpStatus(pVCpu, rcStrict);
7775 }
7776# endif
7777 else
7778 {
7779 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7780 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7781 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7782 return rcStrict;
7783 }
7784 }
7785 else
7786 {
7787 /*
7788 * No access handlers, much simpler.
7789 */
7790 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7791 if (RT_SUCCESS(rc))
7792 {
7793 if (cbSecond)
7794 {
7795 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7796 if (RT_SUCCESS(rc))
7797 { /* likely */ }
7798 else
7799 {
7800 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7801 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7803 return rc;
7804 }
7805 }
7806 }
7807 else
7808 {
7809 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7812 return rc;
7813 }
7814 }
7815 }
7816#endif
7817
7818#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7819 /*
7820 * Record the write(s).
7821 */
7822 if (!pVCpu->iem.s.fNoRem)
7823 {
7824 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7825 if (pEvtRec)
7826 {
7827 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7828 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7829 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7830 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7831 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7832 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7833 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7834 }
7835 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7836 {
7837 pEvtRec = iemVerifyAllocRecord(pVCpu);
7838 if (pEvtRec)
7839 {
7840 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7841 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7842 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7843 memcpy(pEvtRec->u.RamWrite.ab,
7844 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7845 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7846 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7847 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7848 }
7849 }
7850 }
7851#endif
7852#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7853 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7854 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7855 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7856 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7857 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7858 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7859
7860 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7861 g_cbIemWrote = cbWrote;
7862 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7863#endif
7864
7865 /*
7866 * Free the mapping entry.
7867 */
7868 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7869 Assert(pVCpu->iem.s.cActiveMappings != 0);
7870 pVCpu->iem.s.cActiveMappings--;
7871 return VINF_SUCCESS;
7872}
7873
7874
7875/**
7876 * iemMemMap worker that deals with a request crossing pages.
7877 */
7878IEM_STATIC VBOXSTRICTRC
7879iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
7880{
7881 /*
7882 * Do the address translations.
7883 */
7884 RTGCPHYS GCPhysFirst;
7885 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
7886 if (rcStrict != VINF_SUCCESS)
7887 return rcStrict;
7888
7889 RTGCPHYS GCPhysSecond;
7890 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
7891 fAccess, &GCPhysSecond);
7892 if (rcStrict != VINF_SUCCESS)
7893 return rcStrict;
7894 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
7895
7896 PVM pVM = pVCpu->CTX_SUFF(pVM);
7897#ifdef IEM_VERIFICATION_MODE_FULL
7898 /*
7899 * Detect problematic memory when verifying so we can select
7900 * the right execution engine. (TLB: Redo this.)
7901 */
7902 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7903 {
7904 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7905 if (RT_SUCCESS(rc2))
7906 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7907 if (RT_FAILURE(rc2))
7908 pVCpu->iem.s.fProblematicMemory = true;
7909 }
7910#endif
7911
7912
7913 /*
7914 * Read in the current memory content if it's a read, execute or partial
7915 * write access.
7916 */
7917 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7918 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
7919 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
7920
7921 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7922 {
7923 if (!pVCpu->iem.s.fBypassHandlers)
7924 {
7925 /*
7926 * Must carefully deal with access handler status codes here,
7927 * makes the code a bit bloated.
7928 */
7929 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
7930 if (rcStrict == VINF_SUCCESS)
7931 {
7932 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7933 if (rcStrict == VINF_SUCCESS)
7934 { /*likely */ }
7935 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7936 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7937 else
7938 {
7939 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
7940 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7941 return rcStrict;
7942 }
7943 }
7944 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7945 {
7946 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7947 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7948 {
7949 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7950 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7951 }
7952 else
7953 {
7954 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
7955 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
7956 return rcStrict2;
7957 }
7958 }
7959 else
7960 {
7961 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7962 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7963 return rcStrict;
7964 }
7965 }
7966 else
7967 {
7968 /*
7969 * No informational status codes here, much more straight forward.
7970 */
7971 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
7972 if (RT_SUCCESS(rc))
7973 {
7974 Assert(rc == VINF_SUCCESS);
7975 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
7976 if (RT_SUCCESS(rc))
7977 Assert(rc == VINF_SUCCESS);
7978 else
7979 {
7980 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
7981 return rc;
7982 }
7983 }
7984 else
7985 {
7986 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
7987 return rc;
7988 }
7989 }
7990
7991#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7992 if ( !pVCpu->iem.s.fNoRem
7993 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7994 {
7995 /*
7996 * Record the reads.
7997 */
7998 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7999 if (pEvtRec)
8000 {
8001 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8002 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8003 pEvtRec->u.RamRead.cb = cbFirstPage;
8004 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8005 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8006 }
8007 pEvtRec = iemVerifyAllocRecord(pVCpu);
8008 if (pEvtRec)
8009 {
8010 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8011 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8012 pEvtRec->u.RamRead.cb = cbSecondPage;
8013 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8014 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8015 }
8016 }
8017#endif
8018 }
8019#ifdef VBOX_STRICT
8020 else
8021 memset(pbBuf, 0xcc, cbMem);
8022 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8023 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8024#endif
8025
8026 /*
8027 * Commit the bounce buffer entry.
8028 */
8029 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8030 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8031 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8032 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8033 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8034 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8035 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8036 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8037 pVCpu->iem.s.cActiveMappings++;
8038
8039 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8040 *ppvMem = pbBuf;
8041 return VINF_SUCCESS;
8042}
8043
8044
8045/**
8046 * iemMemMap woker that deals with iemMemPageMap failures.
8047 */
8048IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8049 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8050{
8051 /*
8052 * Filter out conditions we can handle and the ones which shouldn't happen.
8053 */
8054 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8055 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8056 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8057 {
8058 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8059 return rcMap;
8060 }
8061 pVCpu->iem.s.cPotentialExits++;
8062
8063 /*
8064 * Read in the current memory content if it's a read, execute or partial
8065 * write access.
8066 */
8067 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8068 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8069 {
8070 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8071 memset(pbBuf, 0xff, cbMem);
8072 else
8073 {
8074 int rc;
8075 if (!pVCpu->iem.s.fBypassHandlers)
8076 {
8077 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8078 if (rcStrict == VINF_SUCCESS)
8079 { /* nothing */ }
8080 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8081 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8082 else
8083 {
8084 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8085 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8086 return rcStrict;
8087 }
8088 }
8089 else
8090 {
8091 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8092 if (RT_SUCCESS(rc))
8093 { /* likely */ }
8094 else
8095 {
8096 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8097 GCPhysFirst, rc));
8098 return rc;
8099 }
8100 }
8101 }
8102
8103#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8104 if ( !pVCpu->iem.s.fNoRem
8105 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8106 {
8107 /*
8108 * Record the read.
8109 */
8110 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8111 if (pEvtRec)
8112 {
8113 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8114 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8115 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8116 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8117 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8118 }
8119 }
8120#endif
8121 }
8122#ifdef VBOX_STRICT
8123 else
8124 memset(pbBuf, 0xcc, cbMem);
8125#endif
8126#ifdef VBOX_STRICT
8127 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8128 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8129#endif
8130
8131 /*
8132 * Commit the bounce buffer entry.
8133 */
8134 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8136 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8137 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8138 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8139 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8140 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8141 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8142 pVCpu->iem.s.cActiveMappings++;
8143
8144 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8145 *ppvMem = pbBuf;
8146 return VINF_SUCCESS;
8147}
8148
8149
8150
8151/**
8152 * Maps the specified guest memory for the given kind of access.
8153 *
8154 * This may be using bounce buffering of the memory if it's crossing a page
8155 * boundary or if there is an access handler installed for any of it. Because
8156 * of lock prefix guarantees, we're in for some extra clutter when this
8157 * happens.
8158 *
8159 * This may raise a \#GP, \#SS, \#PF or \#AC.
8160 *
8161 * @returns VBox strict status code.
8162 *
8163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8164 * @param ppvMem Where to return the pointer to the mapped
8165 * memory.
8166 * @param cbMem The number of bytes to map. This is usually 1,
8167 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8168 * string operations it can be up to a page.
8169 * @param iSegReg The index of the segment register to use for
8170 * this access. The base and limits are checked.
8171 * Use UINT8_MAX to indicate that no segmentation
8172 * is required (for IDT, GDT and LDT accesses).
8173 * @param GCPtrMem The address of the guest memory.
8174 * @param fAccess How the memory is being accessed. The
8175 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8176 * how to map the memory, while the
8177 * IEM_ACCESS_WHAT_XXX bit is used when raising
8178 * exceptions.
8179 */
8180IEM_STATIC VBOXSTRICTRC
8181iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8182{
8183 /*
8184 * Check the input and figure out which mapping entry to use.
8185 */
8186 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8187 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8188 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8189
8190 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8191 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8192 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8193 {
8194 iMemMap = iemMemMapFindFree(pVCpu);
8195 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8196 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8197 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8198 pVCpu->iem.s.aMemMappings[2].fAccess),
8199 VERR_IEM_IPE_9);
8200 }
8201
8202 /*
8203 * Map the memory, checking that we can actually access it. If something
8204 * slightly complicated happens, fall back on bounce buffering.
8205 */
8206 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8207 if (rcStrict != VINF_SUCCESS)
8208 return rcStrict;
8209
8210 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8211 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8212
8213 RTGCPHYS GCPhysFirst;
8214 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8215 if (rcStrict != VINF_SUCCESS)
8216 return rcStrict;
8217
8218 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8219 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8220 if (fAccess & IEM_ACCESS_TYPE_READ)
8221 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8222
8223 void *pvMem;
8224 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8225 if (rcStrict != VINF_SUCCESS)
8226 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8227
8228 /*
8229 * Fill in the mapping table entry.
8230 */
8231 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8232 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8233 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8234 pVCpu->iem.s.cActiveMappings++;
8235
8236 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8237 *ppvMem = pvMem;
8238 return VINF_SUCCESS;
8239}
8240
8241
8242/**
8243 * Commits the guest memory if bounce buffered and unmaps it.
8244 *
8245 * @returns Strict VBox status code.
8246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8247 * @param pvMem The mapping.
8248 * @param fAccess The kind of access.
8249 */
8250IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8251{
8252 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8253 AssertReturn(iMemMap >= 0, iMemMap);
8254
8255 /* If it's bounce buffered, we may need to write back the buffer. */
8256 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8257 {
8258 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8259 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8260 }
8261 /* Otherwise unlock it. */
8262 else
8263 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8264
8265 /* Free the entry. */
8266 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8267 Assert(pVCpu->iem.s.cActiveMappings != 0);
8268 pVCpu->iem.s.cActiveMappings--;
8269 return VINF_SUCCESS;
8270}
8271
8272#ifdef IEM_WITH_SETJMP
8273
8274/**
8275 * Maps the specified guest memory for the given kind of access, longjmp on
8276 * error.
8277 *
8278 * This may be using bounce buffering of the memory if it's crossing a page
8279 * boundary or if there is an access handler installed for any of it. Because
8280 * of lock prefix guarantees, we're in for some extra clutter when this
8281 * happens.
8282 *
8283 * This may raise a \#GP, \#SS, \#PF or \#AC.
8284 *
8285 * @returns Pointer to the mapped memory.
8286 *
8287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8288 * @param cbMem The number of bytes to map. This is usually 1,
8289 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8290 * string operations it can be up to a page.
8291 * @param iSegReg The index of the segment register to use for
8292 * this access. The base and limits are checked.
8293 * Use UINT8_MAX to indicate that no segmentation
8294 * is required (for IDT, GDT and LDT accesses).
8295 * @param GCPtrMem The address of the guest memory.
8296 * @param fAccess How the memory is being accessed. The
8297 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8298 * how to map the memory, while the
8299 * IEM_ACCESS_WHAT_XXX bit is used when raising
8300 * exceptions.
8301 */
8302IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8303{
8304 /*
8305 * Check the input and figure out which mapping entry to use.
8306 */
8307 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8308 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8309 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8310
8311 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8312 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8313 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8314 {
8315 iMemMap = iemMemMapFindFree(pVCpu);
8316 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8317 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8318 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8319 pVCpu->iem.s.aMemMappings[2].fAccess),
8320 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8321 }
8322
8323 /*
8324 * Map the memory, checking that we can actually access it. If something
8325 * slightly complicated happens, fall back on bounce buffering.
8326 */
8327 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8328 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8329 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8330
8331 /* Crossing a page boundary? */
8332 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8333 { /* No (likely). */ }
8334 else
8335 {
8336 void *pvMem;
8337 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8338 if (rcStrict == VINF_SUCCESS)
8339 return pvMem;
8340 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8341 }
8342
8343 RTGCPHYS GCPhysFirst;
8344 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8345 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8346 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8347
8348 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8349 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8350 if (fAccess & IEM_ACCESS_TYPE_READ)
8351 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8352
8353 void *pvMem;
8354 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8355 if (rcStrict == VINF_SUCCESS)
8356 { /* likely */ }
8357 else
8358 {
8359 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8360 if (rcStrict == VINF_SUCCESS)
8361 return pvMem;
8362 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8363 }
8364
8365 /*
8366 * Fill in the mapping table entry.
8367 */
8368 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8369 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8370 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8371 pVCpu->iem.s.cActiveMappings++;
8372
8373 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8374 return pvMem;
8375}
8376
8377
8378/**
8379 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8380 *
8381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8382 * @param pvMem The mapping.
8383 * @param fAccess The kind of access.
8384 */
8385IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8386{
8387 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8388 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8389
8390 /* If it's bounce buffered, we may need to write back the buffer. */
8391 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8392 {
8393 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8394 {
8395 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8396 if (rcStrict == VINF_SUCCESS)
8397 return;
8398 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8399 }
8400 }
8401 /* Otherwise unlock it. */
8402 else
8403 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8404
8405 /* Free the entry. */
8406 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8407 Assert(pVCpu->iem.s.cActiveMappings != 0);
8408 pVCpu->iem.s.cActiveMappings--;
8409}
8410
8411#endif
8412
8413#ifndef IN_RING3
8414/**
8415 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8416 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8417 *
8418 * Allows the instruction to be completed and retired, while the IEM user will
8419 * return to ring-3 immediately afterwards and do the postponed writes there.
8420 *
8421 * @returns VBox status code (no strict statuses). Caller must check
8422 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8424 * @param pvMem The mapping.
8425 * @param fAccess The kind of access.
8426 */
8427IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8428{
8429 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8430 AssertReturn(iMemMap >= 0, iMemMap);
8431
8432 /* If it's bounce buffered, we may need to write back the buffer. */
8433 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8434 {
8435 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8436 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8437 }
8438 /* Otherwise unlock it. */
8439 else
8440 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8441
8442 /* Free the entry. */
8443 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8444 Assert(pVCpu->iem.s.cActiveMappings != 0);
8445 pVCpu->iem.s.cActiveMappings--;
8446 return VINF_SUCCESS;
8447}
8448#endif
8449
8450
8451/**
8452 * Rollbacks mappings, releasing page locks and such.
8453 *
8454 * The caller shall only call this after checking cActiveMappings.
8455 *
8456 * @returns Strict VBox status code to pass up.
8457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8458 */
8459IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8460{
8461 Assert(pVCpu->iem.s.cActiveMappings > 0);
8462
8463 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8464 while (iMemMap-- > 0)
8465 {
8466 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8467 if (fAccess != IEM_ACCESS_INVALID)
8468 {
8469 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8470 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8471 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8472 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8473 Assert(pVCpu->iem.s.cActiveMappings > 0);
8474 pVCpu->iem.s.cActiveMappings--;
8475 }
8476 }
8477}
8478
8479
8480/**
8481 * Fetches a data byte.
8482 *
8483 * @returns Strict VBox status code.
8484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8485 * @param pu8Dst Where to return the byte.
8486 * @param iSegReg The index of the segment register to use for
8487 * this access. The base and limits are checked.
8488 * @param GCPtrMem The address of the guest memory.
8489 */
8490IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8491{
8492 /* The lazy approach for now... */
8493 uint8_t const *pu8Src;
8494 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8495 if (rc == VINF_SUCCESS)
8496 {
8497 *pu8Dst = *pu8Src;
8498 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8499 }
8500 return rc;
8501}
8502
8503
8504#ifdef IEM_WITH_SETJMP
8505/**
8506 * Fetches a data byte, longjmp on error.
8507 *
8508 * @returns The byte.
8509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8510 * @param iSegReg The index of the segment register to use for
8511 * this access. The base and limits are checked.
8512 * @param GCPtrMem The address of the guest memory.
8513 */
8514DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8515{
8516 /* The lazy approach for now... */
8517 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8518 uint8_t const bRet = *pu8Src;
8519 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8520 return bRet;
8521}
8522#endif /* IEM_WITH_SETJMP */
8523
8524
8525/**
8526 * Fetches a data word.
8527 *
8528 * @returns Strict VBox status code.
8529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8530 * @param pu16Dst Where to return the word.
8531 * @param iSegReg The index of the segment register to use for
8532 * this access. The base and limits are checked.
8533 * @param GCPtrMem The address of the guest memory.
8534 */
8535IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8536{
8537 /* The lazy approach for now... */
8538 uint16_t const *pu16Src;
8539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8540 if (rc == VINF_SUCCESS)
8541 {
8542 *pu16Dst = *pu16Src;
8543 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8544 }
8545 return rc;
8546}
8547
8548
8549#ifdef IEM_WITH_SETJMP
8550/**
8551 * Fetches a data word, longjmp on error.
8552 *
8553 * @returns The word
8554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8555 * @param iSegReg The index of the segment register to use for
8556 * this access. The base and limits are checked.
8557 * @param GCPtrMem The address of the guest memory.
8558 */
8559DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8560{
8561 /* The lazy approach for now... */
8562 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8563 uint16_t const u16Ret = *pu16Src;
8564 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8565 return u16Ret;
8566}
8567#endif
8568
8569
8570/**
8571 * Fetches a data dword.
8572 *
8573 * @returns Strict VBox status code.
8574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8575 * @param pu32Dst Where to return the dword.
8576 * @param iSegReg The index of the segment register to use for
8577 * this access. The base and limits are checked.
8578 * @param GCPtrMem The address of the guest memory.
8579 */
8580IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8581{
8582 /* The lazy approach for now... */
8583 uint32_t const *pu32Src;
8584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8585 if (rc == VINF_SUCCESS)
8586 {
8587 *pu32Dst = *pu32Src;
8588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8589 }
8590 return rc;
8591}
8592
8593
8594#ifdef IEM_WITH_SETJMP
8595
8596IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8597{
8598 Assert(cbMem >= 1);
8599 Assert(iSegReg < X86_SREG_COUNT);
8600
8601 /*
8602 * 64-bit mode is simpler.
8603 */
8604 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8605 {
8606 if (iSegReg >= X86_SREG_FS)
8607 {
8608 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8609 GCPtrMem += pSel->u64Base;
8610 }
8611
8612 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8613 return GCPtrMem;
8614 }
8615 /*
8616 * 16-bit and 32-bit segmentation.
8617 */
8618 else
8619 {
8620 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8621 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8622 == X86DESCATTR_P /* data, expand up */
8623 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8624 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8625 {
8626 /* expand up */
8627 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8628 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8629 && GCPtrLast32 > (uint32_t)GCPtrMem))
8630 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8631 }
8632 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8633 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8634 {
8635 /* expand down */
8636 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8637 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8638 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8639 && GCPtrLast32 > (uint32_t)GCPtrMem))
8640 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8641 }
8642 else
8643 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8644 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8645 }
8646 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8647}
8648
8649
8650IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8651{
8652 Assert(cbMem >= 1);
8653 Assert(iSegReg < X86_SREG_COUNT);
8654
8655 /*
8656 * 64-bit mode is simpler.
8657 */
8658 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8659 {
8660 if (iSegReg >= X86_SREG_FS)
8661 {
8662 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8663 GCPtrMem += pSel->u64Base;
8664 }
8665
8666 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8667 return GCPtrMem;
8668 }
8669 /*
8670 * 16-bit and 32-bit segmentation.
8671 */
8672 else
8673 {
8674 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8675 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8676 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8677 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8678 {
8679 /* expand up */
8680 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8681 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8682 && GCPtrLast32 > (uint32_t)GCPtrMem))
8683 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8684 }
8685 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8686 {
8687 /* expand down */
8688 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8689 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8690 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8691 && GCPtrLast32 > (uint32_t)GCPtrMem))
8692 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8693 }
8694 else
8695 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8696 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8697 }
8698 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8699}
8700
8701
8702/**
8703 * Fetches a data dword, longjmp on error, fallback/safe version.
8704 *
8705 * @returns The dword
8706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8707 * @param iSegReg The index of the segment register to use for
8708 * this access. The base and limits are checked.
8709 * @param GCPtrMem The address of the guest memory.
8710 */
8711IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8712{
8713 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8714 uint32_t const u32Ret = *pu32Src;
8715 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8716 return u32Ret;
8717}
8718
8719
8720/**
8721 * Fetches a data dword, longjmp on error.
8722 *
8723 * @returns The dword
8724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8725 * @param iSegReg The index of the segment register to use for
8726 * this access. The base and limits are checked.
8727 * @param GCPtrMem The address of the guest memory.
8728 */
8729DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8730{
8731# ifdef IEM_WITH_DATA_TLB
8732 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8733 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8734 {
8735 /// @todo more later.
8736 }
8737
8738 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8739# else
8740 /* The lazy approach. */
8741 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8742 uint32_t const u32Ret = *pu32Src;
8743 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8744 return u32Ret;
8745# endif
8746}
8747#endif
8748
8749
8750#ifdef SOME_UNUSED_FUNCTION
8751/**
8752 * Fetches a data dword and sign extends it to a qword.
8753 *
8754 * @returns Strict VBox status code.
8755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8756 * @param pu64Dst Where to return the sign extended value.
8757 * @param iSegReg The index of the segment register to use for
8758 * this access. The base and limits are checked.
8759 * @param GCPtrMem The address of the guest memory.
8760 */
8761IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8762{
8763 /* The lazy approach for now... */
8764 int32_t const *pi32Src;
8765 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8766 if (rc == VINF_SUCCESS)
8767 {
8768 *pu64Dst = *pi32Src;
8769 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8770 }
8771#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8772 else
8773 *pu64Dst = 0;
8774#endif
8775 return rc;
8776}
8777#endif
8778
8779
8780/**
8781 * Fetches a data qword.
8782 *
8783 * @returns Strict VBox status code.
8784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8785 * @param pu64Dst Where to return the qword.
8786 * @param iSegReg The index of the segment register to use for
8787 * this access. The base and limits are checked.
8788 * @param GCPtrMem The address of the guest memory.
8789 */
8790IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8791{
8792 /* The lazy approach for now... */
8793 uint64_t const *pu64Src;
8794 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8795 if (rc == VINF_SUCCESS)
8796 {
8797 *pu64Dst = *pu64Src;
8798 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8799 }
8800 return rc;
8801}
8802
8803
8804#ifdef IEM_WITH_SETJMP
8805/**
8806 * Fetches a data qword, longjmp on error.
8807 *
8808 * @returns The qword.
8809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8810 * @param iSegReg The index of the segment register to use for
8811 * this access. The base and limits are checked.
8812 * @param GCPtrMem The address of the guest memory.
8813 */
8814DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8815{
8816 /* The lazy approach for now... */
8817 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8818 uint64_t const u64Ret = *pu64Src;
8819 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8820 return u64Ret;
8821}
8822#endif
8823
8824
8825/**
8826 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8827 *
8828 * @returns Strict VBox status code.
8829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8830 * @param pu64Dst Where to return the qword.
8831 * @param iSegReg The index of the segment register to use for
8832 * this access. The base and limits are checked.
8833 * @param GCPtrMem The address of the guest memory.
8834 */
8835IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8836{
8837 /* The lazy approach for now... */
8838 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8839 if (RT_UNLIKELY(GCPtrMem & 15))
8840 return iemRaiseGeneralProtectionFault0(pVCpu);
8841
8842 uint64_t const *pu64Src;
8843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8844 if (rc == VINF_SUCCESS)
8845 {
8846 *pu64Dst = *pu64Src;
8847 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8848 }
8849 return rc;
8850}
8851
8852
8853#ifdef IEM_WITH_SETJMP
8854/**
8855 * Fetches a data qword, longjmp on error.
8856 *
8857 * @returns The qword.
8858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8859 * @param iSegReg The index of the segment register to use for
8860 * this access. The base and limits are checked.
8861 * @param GCPtrMem The address of the guest memory.
8862 */
8863DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8864{
8865 /* The lazy approach for now... */
8866 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8867 if (RT_LIKELY(!(GCPtrMem & 15)))
8868 {
8869 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8870 uint64_t const u64Ret = *pu64Src;
8871 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8872 return u64Ret;
8873 }
8874
8875 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
8876 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
8877}
8878#endif
8879
8880
8881/**
8882 * Fetches a data tword.
8883 *
8884 * @returns Strict VBox status code.
8885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8886 * @param pr80Dst Where to return the tword.
8887 * @param iSegReg The index of the segment register to use for
8888 * this access. The base and limits are checked.
8889 * @param GCPtrMem The address of the guest memory.
8890 */
8891IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8892{
8893 /* The lazy approach for now... */
8894 PCRTFLOAT80U pr80Src;
8895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8896 if (rc == VINF_SUCCESS)
8897 {
8898 *pr80Dst = *pr80Src;
8899 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8900 }
8901 return rc;
8902}
8903
8904
8905#ifdef IEM_WITH_SETJMP
8906/**
8907 * Fetches a data tword, longjmp on error.
8908 *
8909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8910 * @param pr80Dst Where to return the tword.
8911 * @param iSegReg The index of the segment register to use for
8912 * this access. The base and limits are checked.
8913 * @param GCPtrMem The address of the guest memory.
8914 */
8915DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8916{
8917 /* The lazy approach for now... */
8918 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8919 *pr80Dst = *pr80Src;
8920 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8921}
8922#endif
8923
8924
8925/**
8926 * Fetches a data dqword (double qword), generally SSE related.
8927 *
8928 * @returns Strict VBox status code.
8929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8930 * @param pu128Dst Where to return the qword.
8931 * @param iSegReg The index of the segment register to use for
8932 * this access. The base and limits are checked.
8933 * @param GCPtrMem The address of the guest memory.
8934 */
8935IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8936{
8937 /* The lazy approach for now... */
8938 uint128_t const *pu128Src;
8939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8940 if (rc == VINF_SUCCESS)
8941 {
8942 *pu128Dst = *pu128Src;
8943 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8944 }
8945 return rc;
8946}
8947
8948
8949#ifdef IEM_WITH_SETJMP
8950/**
8951 * Fetches a data dqword (double qword), generally SSE related.
8952 *
8953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8954 * @param pu128Dst Where to return the qword.
8955 * @param iSegReg The index of the segment register to use for
8956 * this access. The base and limits are checked.
8957 * @param GCPtrMem The address of the guest memory.
8958 */
8959IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8960{
8961 /* The lazy approach for now... */
8962 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8963 *pu128Dst = *pu128Src;
8964 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8965}
8966#endif
8967
8968
8969/**
8970 * Fetches a data dqword (double qword) at an aligned address, generally SSE
8971 * related.
8972 *
8973 * Raises \#GP(0) if not aligned.
8974 *
8975 * @returns Strict VBox status code.
8976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8977 * @param pu128Dst Where to return the qword.
8978 * @param iSegReg The index of the segment register to use for
8979 * this access. The base and limits are checked.
8980 * @param GCPtrMem The address of the guest memory.
8981 */
8982IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8983{
8984 /* The lazy approach for now... */
8985 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8986 if ( (GCPtrMem & 15)
8987 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
8988 return iemRaiseGeneralProtectionFault0(pVCpu);
8989
8990 uint128_t const *pu128Src;
8991 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8992 if (rc == VINF_SUCCESS)
8993 {
8994 *pu128Dst = *pu128Src;
8995 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8996 }
8997 return rc;
8998}
8999
9000
9001#ifdef IEM_WITH_SETJMP
9002/**
9003 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9004 * related, longjmp on error.
9005 *
9006 * Raises \#GP(0) if not aligned.
9007 *
9008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9009 * @param pu128Dst Where to return the qword.
9010 * @param iSegReg The index of the segment register to use for
9011 * this access. The base and limits are checked.
9012 * @param GCPtrMem The address of the guest memory.
9013 */
9014DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9015{
9016 /* The lazy approach for now... */
9017 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9018 if ( (GCPtrMem & 15) == 0
9019 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9020 {
9021 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9022 IEM_ACCESS_DATA_R);
9023 *pu128Dst = *pu128Src;
9024 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9025 return;
9026 }
9027
9028 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9029 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9030}
9031#endif
9032
9033
9034
9035/**
9036 * Fetches a descriptor register (lgdt, lidt).
9037 *
9038 * @returns Strict VBox status code.
9039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9040 * @param pcbLimit Where to return the limit.
9041 * @param pGCPtrBase Where to return the base.
9042 * @param iSegReg The index of the segment register to use for
9043 * this access. The base and limits are checked.
9044 * @param GCPtrMem The address of the guest memory.
9045 * @param enmOpSize The effective operand size.
9046 */
9047IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9048 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9049{
9050 /*
9051 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9052 * little special:
9053 * - The two reads are done separately.
9054 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9055 * - We suspect the 386 to actually commit the limit before the base in
9056 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9057 * don't try emulate this eccentric behavior, because it's not well
9058 * enough understood and rather hard to trigger.
9059 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9060 */
9061 VBOXSTRICTRC rcStrict;
9062 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9063 {
9064 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9065 if (rcStrict == VINF_SUCCESS)
9066 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9067 }
9068 else
9069 {
9070 uint32_t uTmp;
9071 if (enmOpSize == IEMMODE_32BIT)
9072 {
9073 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9074 {
9075 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9076 if (rcStrict == VINF_SUCCESS)
9077 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9078 }
9079 else
9080 {
9081 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9082 if (rcStrict == VINF_SUCCESS)
9083 {
9084 *pcbLimit = (uint16_t)uTmp;
9085 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9086 }
9087 }
9088 if (rcStrict == VINF_SUCCESS)
9089 *pGCPtrBase = uTmp;
9090 }
9091 else
9092 {
9093 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9094 if (rcStrict == VINF_SUCCESS)
9095 {
9096 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9097 if (rcStrict == VINF_SUCCESS)
9098 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9099 }
9100 }
9101 }
9102 return rcStrict;
9103}
9104
9105
9106
9107/**
9108 * Stores a data byte.
9109 *
9110 * @returns Strict VBox status code.
9111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9112 * @param iSegReg The index of the segment register to use for
9113 * this access. The base and limits are checked.
9114 * @param GCPtrMem The address of the guest memory.
9115 * @param u8Value The value to store.
9116 */
9117IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9118{
9119 /* The lazy approach for now... */
9120 uint8_t *pu8Dst;
9121 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9122 if (rc == VINF_SUCCESS)
9123 {
9124 *pu8Dst = u8Value;
9125 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9126 }
9127 return rc;
9128}
9129
9130
9131#ifdef IEM_WITH_SETJMP
9132/**
9133 * Stores a data byte, longjmp on error.
9134 *
9135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9136 * @param iSegReg The index of the segment register to use for
9137 * this access. The base and limits are checked.
9138 * @param GCPtrMem The address of the guest memory.
9139 * @param u8Value The value to store.
9140 */
9141IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9142{
9143 /* The lazy approach for now... */
9144 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9145 *pu8Dst = u8Value;
9146 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9147}
9148#endif
9149
9150
9151/**
9152 * Stores a data word.
9153 *
9154 * @returns Strict VBox status code.
9155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 * @param u16Value The value to store.
9160 */
9161IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9162{
9163 /* The lazy approach for now... */
9164 uint16_t *pu16Dst;
9165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9166 if (rc == VINF_SUCCESS)
9167 {
9168 *pu16Dst = u16Value;
9169 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9170 }
9171 return rc;
9172}
9173
9174
9175#ifdef IEM_WITH_SETJMP
9176/**
9177 * Stores a data word, longjmp on error.
9178 *
9179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9180 * @param iSegReg The index of the segment register to use for
9181 * this access. The base and limits are checked.
9182 * @param GCPtrMem The address of the guest memory.
9183 * @param u16Value The value to store.
9184 */
9185IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9186{
9187 /* The lazy approach for now... */
9188 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9189 *pu16Dst = u16Value;
9190 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9191}
9192#endif
9193
9194
9195/**
9196 * Stores a data dword.
9197 *
9198 * @returns Strict VBox status code.
9199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9200 * @param iSegReg The index of the segment register to use for
9201 * this access. The base and limits are checked.
9202 * @param GCPtrMem The address of the guest memory.
9203 * @param u32Value The value to store.
9204 */
9205IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9206{
9207 /* The lazy approach for now... */
9208 uint32_t *pu32Dst;
9209 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9210 if (rc == VINF_SUCCESS)
9211 {
9212 *pu32Dst = u32Value;
9213 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9214 }
9215 return rc;
9216}
9217
9218
9219#ifdef IEM_WITH_SETJMP
9220/**
9221 * Stores a data dword.
9222 *
9223 * @returns Strict VBox status code.
9224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9225 * @param iSegReg The index of the segment register to use for
9226 * this access. The base and limits are checked.
9227 * @param GCPtrMem The address of the guest memory.
9228 * @param u32Value The value to store.
9229 */
9230IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9231{
9232 /* The lazy approach for now... */
9233 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9234 *pu32Dst = u32Value;
9235 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9236}
9237#endif
9238
9239
9240/**
9241 * Stores a data qword.
9242 *
9243 * @returns Strict VBox status code.
9244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9245 * @param iSegReg The index of the segment register to use for
9246 * this access. The base and limits are checked.
9247 * @param GCPtrMem The address of the guest memory.
9248 * @param u64Value The value to store.
9249 */
9250IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9251{
9252 /* The lazy approach for now... */
9253 uint64_t *pu64Dst;
9254 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9255 if (rc == VINF_SUCCESS)
9256 {
9257 *pu64Dst = u64Value;
9258 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9259 }
9260 return rc;
9261}
9262
9263
9264#ifdef IEM_WITH_SETJMP
9265/**
9266 * Stores a data qword, longjmp on error.
9267 *
9268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9269 * @param iSegReg The index of the segment register to use for
9270 * this access. The base and limits are checked.
9271 * @param GCPtrMem The address of the guest memory.
9272 * @param u64Value The value to store.
9273 */
9274IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9275{
9276 /* The lazy approach for now... */
9277 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9278 *pu64Dst = u64Value;
9279 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9280}
9281#endif
9282
9283
9284/**
9285 * Stores a data dqword.
9286 *
9287 * @returns Strict VBox status code.
9288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9289 * @param iSegReg The index of the segment register to use for
9290 * this access. The base and limits are checked.
9291 * @param GCPtrMem The address of the guest memory.
9292 * @param u128Value The value to store.
9293 */
9294IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9295{
9296 /* The lazy approach for now... */
9297 uint128_t *pu128Dst;
9298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9299 if (rc == VINF_SUCCESS)
9300 {
9301 *pu128Dst = u128Value;
9302 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9303 }
9304 return rc;
9305}
9306
9307
9308#ifdef IEM_WITH_SETJMP
9309/**
9310 * Stores a data dqword, longjmp on error.
9311 *
9312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9313 * @param iSegReg The index of the segment register to use for
9314 * this access. The base and limits are checked.
9315 * @param GCPtrMem The address of the guest memory.
9316 * @param u128Value The value to store.
9317 */
9318IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9319{
9320 /* The lazy approach for now... */
9321 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9322 *pu128Dst = u128Value;
9323 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9324}
9325#endif
9326
9327
9328/**
9329 * Stores a data dqword, SSE aligned.
9330 *
9331 * @returns Strict VBox status code.
9332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9333 * @param iSegReg The index of the segment register to use for
9334 * this access. The base and limits are checked.
9335 * @param GCPtrMem The address of the guest memory.
9336 * @param u128Value The value to store.
9337 */
9338IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9339{
9340 /* The lazy approach for now... */
9341 if ( (GCPtrMem & 15)
9342 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9343 return iemRaiseGeneralProtectionFault0(pVCpu);
9344
9345 uint128_t *pu128Dst;
9346 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9347 if (rc == VINF_SUCCESS)
9348 {
9349 *pu128Dst = u128Value;
9350 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9351 }
9352 return rc;
9353}
9354
9355
9356#ifdef IEM_WITH_SETJMP
9357/**
9358 * Stores a data dqword, SSE aligned.
9359 *
9360 * @returns Strict VBox status code.
9361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9362 * @param iSegReg The index of the segment register to use for
9363 * this access. The base and limits are checked.
9364 * @param GCPtrMem The address of the guest memory.
9365 * @param u128Value The value to store.
9366 */
9367DECL_NO_INLINE(IEM_STATIC, void)
9368iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9369{
9370 /* The lazy approach for now... */
9371 if ( (GCPtrMem & 15) == 0
9372 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9373 {
9374 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9375 *pu128Dst = u128Value;
9376 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9377 return;
9378 }
9379
9380 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9381 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9382}
9383#endif
9384
9385
9386/**
9387 * Stores a descriptor register (sgdt, sidt).
9388 *
9389 * @returns Strict VBox status code.
9390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9391 * @param cbLimit The limit.
9392 * @param GCPtrBase The base address.
9393 * @param iSegReg The index of the segment register to use for
9394 * this access. The base and limits are checked.
9395 * @param GCPtrMem The address of the guest memory.
9396 */
9397IEM_STATIC VBOXSTRICTRC
9398iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9399{
9400 /*
9401 * The SIDT and SGDT instructions actually stores the data using two
9402 * independent writes. The instructions does not respond to opsize prefixes.
9403 */
9404 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9405 if (rcStrict == VINF_SUCCESS)
9406 {
9407 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9408 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9409 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9410 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9411 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9412 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9413 else
9414 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9415 }
9416 return rcStrict;
9417}
9418
9419
9420/**
9421 * Pushes a word onto the stack.
9422 *
9423 * @returns Strict VBox status code.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param u16Value The value to push.
9426 */
9427IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9428{
9429 /* Increment the stack pointer. */
9430 uint64_t uNewRsp;
9431 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9432 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9433
9434 /* Write the word the lazy way. */
9435 uint16_t *pu16Dst;
9436 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9437 if (rc == VINF_SUCCESS)
9438 {
9439 *pu16Dst = u16Value;
9440 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9441 }
9442
9443 /* Commit the new RSP value unless we an access handler made trouble. */
9444 if (rc == VINF_SUCCESS)
9445 pCtx->rsp = uNewRsp;
9446
9447 return rc;
9448}
9449
9450
9451/**
9452 * Pushes a dword onto the stack.
9453 *
9454 * @returns Strict VBox status code.
9455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9456 * @param u32Value The value to push.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9459{
9460 /* Increment the stack pointer. */
9461 uint64_t uNewRsp;
9462 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9463 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9464
9465 /* Write the dword the lazy way. */
9466 uint32_t *pu32Dst;
9467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9468 if (rc == VINF_SUCCESS)
9469 {
9470 *pu32Dst = u32Value;
9471 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9472 }
9473
9474 /* Commit the new RSP value unless we an access handler made trouble. */
9475 if (rc == VINF_SUCCESS)
9476 pCtx->rsp = uNewRsp;
9477
9478 return rc;
9479}
9480
9481
9482/**
9483 * Pushes a dword segment register value onto the stack.
9484 *
9485 * @returns Strict VBox status code.
9486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9487 * @param u32Value The value to push.
9488 */
9489IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9490{
9491 /* Increment the stack pointer. */
9492 uint64_t uNewRsp;
9493 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9494 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9495
9496 VBOXSTRICTRC rc;
9497 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9498 {
9499 /* The recompiler writes a full dword. */
9500 uint32_t *pu32Dst;
9501 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9502 if (rc == VINF_SUCCESS)
9503 {
9504 *pu32Dst = u32Value;
9505 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9506 }
9507 }
9508 else
9509 {
9510 /* The intel docs talks about zero extending the selector register
9511 value. My actual intel CPU here might be zero extending the value
9512 but it still only writes the lower word... */
9513 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9514 * happens when crossing an electric page boundrary, is the high word checked
9515 * for write accessibility or not? Probably it is. What about segment limits?
9516 * It appears this behavior is also shared with trap error codes.
9517 *
9518 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9519 * ancient hardware when it actually did change. */
9520 uint16_t *pu16Dst;
9521 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9522 if (rc == VINF_SUCCESS)
9523 {
9524 *pu16Dst = (uint16_t)u32Value;
9525 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9526 }
9527 }
9528
9529 /* Commit the new RSP value unless we an access handler made trouble. */
9530 if (rc == VINF_SUCCESS)
9531 pCtx->rsp = uNewRsp;
9532
9533 return rc;
9534}
9535
9536
9537/**
9538 * Pushes a qword onto the stack.
9539 *
9540 * @returns Strict VBox status code.
9541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9542 * @param u64Value The value to push.
9543 */
9544IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9545{
9546 /* Increment the stack pointer. */
9547 uint64_t uNewRsp;
9548 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9549 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9550
9551 /* Write the word the lazy way. */
9552 uint64_t *pu64Dst;
9553 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9554 if (rc == VINF_SUCCESS)
9555 {
9556 *pu64Dst = u64Value;
9557 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9558 }
9559
9560 /* Commit the new RSP value unless we an access handler made trouble. */
9561 if (rc == VINF_SUCCESS)
9562 pCtx->rsp = uNewRsp;
9563
9564 return rc;
9565}
9566
9567
9568/**
9569 * Pops a word from the stack.
9570 *
9571 * @returns Strict VBox status code.
9572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9573 * @param pu16Value Where to store the popped value.
9574 */
9575IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9576{
9577 /* Increment the stack pointer. */
9578 uint64_t uNewRsp;
9579 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9580 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9581
9582 /* Write the word the lazy way. */
9583 uint16_t const *pu16Src;
9584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9585 if (rc == VINF_SUCCESS)
9586 {
9587 *pu16Value = *pu16Src;
9588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9589
9590 /* Commit the new RSP value. */
9591 if (rc == VINF_SUCCESS)
9592 pCtx->rsp = uNewRsp;
9593 }
9594
9595 return rc;
9596}
9597
9598
9599/**
9600 * Pops a dword from the stack.
9601 *
9602 * @returns Strict VBox status code.
9603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9604 * @param pu32Value Where to store the popped value.
9605 */
9606IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9607{
9608 /* Increment the stack pointer. */
9609 uint64_t uNewRsp;
9610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9611 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9612
9613 /* Write the word the lazy way. */
9614 uint32_t const *pu32Src;
9615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9616 if (rc == VINF_SUCCESS)
9617 {
9618 *pu32Value = *pu32Src;
9619 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9620
9621 /* Commit the new RSP value. */
9622 if (rc == VINF_SUCCESS)
9623 pCtx->rsp = uNewRsp;
9624 }
9625
9626 return rc;
9627}
9628
9629
9630/**
9631 * Pops a qword from the stack.
9632 *
9633 * @returns Strict VBox status code.
9634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9635 * @param pu64Value Where to store the popped value.
9636 */
9637IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9638{
9639 /* Increment the stack pointer. */
9640 uint64_t uNewRsp;
9641 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9642 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9643
9644 /* Write the word the lazy way. */
9645 uint64_t const *pu64Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 *pu64Value = *pu64Src;
9650 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9651
9652 /* Commit the new RSP value. */
9653 if (rc == VINF_SUCCESS)
9654 pCtx->rsp = uNewRsp;
9655 }
9656
9657 return rc;
9658}
9659
9660
9661/**
9662 * Pushes a word onto the stack, using a temporary stack pointer.
9663 *
9664 * @returns Strict VBox status code.
9665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9666 * @param u16Value The value to push.
9667 * @param pTmpRsp Pointer to the temporary stack pointer.
9668 */
9669IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9670{
9671 /* Increment the stack pointer. */
9672 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9673 RTUINT64U NewRsp = *pTmpRsp;
9674 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9675
9676 /* Write the word the lazy way. */
9677 uint16_t *pu16Dst;
9678 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9679 if (rc == VINF_SUCCESS)
9680 {
9681 *pu16Dst = u16Value;
9682 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9683 }
9684
9685 /* Commit the new RSP value unless we an access handler made trouble. */
9686 if (rc == VINF_SUCCESS)
9687 *pTmpRsp = NewRsp;
9688
9689 return rc;
9690}
9691
9692
9693/**
9694 * Pushes a dword onto the stack, using a temporary stack pointer.
9695 *
9696 * @returns Strict VBox status code.
9697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9698 * @param u32Value The value to push.
9699 * @param pTmpRsp Pointer to the temporary stack pointer.
9700 */
9701IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9702{
9703 /* Increment the stack pointer. */
9704 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9705 RTUINT64U NewRsp = *pTmpRsp;
9706 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9707
9708 /* Write the word the lazy way. */
9709 uint32_t *pu32Dst;
9710 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9711 if (rc == VINF_SUCCESS)
9712 {
9713 *pu32Dst = u32Value;
9714 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9715 }
9716
9717 /* Commit the new RSP value unless we an access handler made trouble. */
9718 if (rc == VINF_SUCCESS)
9719 *pTmpRsp = NewRsp;
9720
9721 return rc;
9722}
9723
9724
9725/**
9726 * Pushes a dword onto the stack, using a temporary stack pointer.
9727 *
9728 * @returns Strict VBox status code.
9729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9730 * @param u64Value The value to push.
9731 * @param pTmpRsp Pointer to the temporary stack pointer.
9732 */
9733IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9734{
9735 /* Increment the stack pointer. */
9736 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9737 RTUINT64U NewRsp = *pTmpRsp;
9738 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9739
9740 /* Write the word the lazy way. */
9741 uint64_t *pu64Dst;
9742 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9743 if (rc == VINF_SUCCESS)
9744 {
9745 *pu64Dst = u64Value;
9746 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9747 }
9748
9749 /* Commit the new RSP value unless we an access handler made trouble. */
9750 if (rc == VINF_SUCCESS)
9751 *pTmpRsp = NewRsp;
9752
9753 return rc;
9754}
9755
9756
9757/**
9758 * Pops a word from the stack, using a temporary stack pointer.
9759 *
9760 * @returns Strict VBox status code.
9761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9762 * @param pu16Value Where to store the popped value.
9763 * @param pTmpRsp Pointer to the temporary stack pointer.
9764 */
9765IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9766{
9767 /* Increment the stack pointer. */
9768 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9769 RTUINT64U NewRsp = *pTmpRsp;
9770 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9771
9772 /* Write the word the lazy way. */
9773 uint16_t const *pu16Src;
9774 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9775 if (rc == VINF_SUCCESS)
9776 {
9777 *pu16Value = *pu16Src;
9778 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9779
9780 /* Commit the new RSP value. */
9781 if (rc == VINF_SUCCESS)
9782 *pTmpRsp = NewRsp;
9783 }
9784
9785 return rc;
9786}
9787
9788
9789/**
9790 * Pops a dword from the stack, using a temporary stack pointer.
9791 *
9792 * @returns Strict VBox status code.
9793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9794 * @param pu32Value Where to store the popped value.
9795 * @param pTmpRsp Pointer to the temporary stack pointer.
9796 */
9797IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9798{
9799 /* Increment the stack pointer. */
9800 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9801 RTUINT64U NewRsp = *pTmpRsp;
9802 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9803
9804 /* Write the word the lazy way. */
9805 uint32_t const *pu32Src;
9806 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9807 if (rc == VINF_SUCCESS)
9808 {
9809 *pu32Value = *pu32Src;
9810 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9811
9812 /* Commit the new RSP value. */
9813 if (rc == VINF_SUCCESS)
9814 *pTmpRsp = NewRsp;
9815 }
9816
9817 return rc;
9818}
9819
9820
9821/**
9822 * Pops a qword from the stack, using a temporary stack pointer.
9823 *
9824 * @returns Strict VBox status code.
9825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9826 * @param pu64Value Where to store the popped value.
9827 * @param pTmpRsp Pointer to the temporary stack pointer.
9828 */
9829IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9830{
9831 /* Increment the stack pointer. */
9832 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9833 RTUINT64U NewRsp = *pTmpRsp;
9834 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9835
9836 /* Write the word the lazy way. */
9837 uint64_t const *pu64Src;
9838 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9839 if (rcStrict == VINF_SUCCESS)
9840 {
9841 *pu64Value = *pu64Src;
9842 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9843
9844 /* Commit the new RSP value. */
9845 if (rcStrict == VINF_SUCCESS)
9846 *pTmpRsp = NewRsp;
9847 }
9848
9849 return rcStrict;
9850}
9851
9852
9853/**
9854 * Begin a special stack push (used by interrupt, exceptions and such).
9855 *
9856 * This will raise \#SS or \#PF if appropriate.
9857 *
9858 * @returns Strict VBox status code.
9859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9860 * @param cbMem The number of bytes to push onto the stack.
9861 * @param ppvMem Where to return the pointer to the stack memory.
9862 * As with the other memory functions this could be
9863 * direct access or bounce buffered access, so
9864 * don't commit register until the commit call
9865 * succeeds.
9866 * @param puNewRsp Where to return the new RSP value. This must be
9867 * passed unchanged to
9868 * iemMemStackPushCommitSpecial().
9869 */
9870IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
9871{
9872 Assert(cbMem < UINT8_MAX);
9873 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9874 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9875 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9876}
9877
9878
9879/**
9880 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
9881 *
9882 * This will update the rSP.
9883 *
9884 * @returns Strict VBox status code.
9885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9886 * @param pvMem The pointer returned by
9887 * iemMemStackPushBeginSpecial().
9888 * @param uNewRsp The new RSP value returned by
9889 * iemMemStackPushBeginSpecial().
9890 */
9891IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
9892{
9893 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
9894 if (rcStrict == VINF_SUCCESS)
9895 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9896 return rcStrict;
9897}
9898
9899
9900/**
9901 * Begin a special stack pop (used by iret, retf and such).
9902 *
9903 * This will raise \#SS or \#PF if appropriate.
9904 *
9905 * @returns Strict VBox status code.
9906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9907 * @param cbMem The number of bytes to push onto the stack.
9908 * @param ppvMem Where to return the pointer to the stack memory.
9909 * @param puNewRsp Where to return the new RSP value. This must be
9910 * passed unchanged to
9911 * iemMemStackPopCommitSpecial() or applied
9912 * manually if iemMemStackPopDoneSpecial() is used.
9913 */
9914IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9915{
9916 Assert(cbMem < UINT8_MAX);
9917 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9918 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9919 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9920}
9921
9922
9923/**
9924 * Continue a special stack pop (used by iret and retf).
9925 *
9926 * This will raise \#SS or \#PF if appropriate.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param cbMem The number of bytes to push onto the stack.
9931 * @param ppvMem Where to return the pointer to the stack memory.
9932 * @param puNewRsp Where to return the new RSP value. This must be
9933 * passed unchanged to
9934 * iemMemStackPopCommitSpecial() or applied
9935 * manually if iemMemStackPopDoneSpecial() is used.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9938{
9939 Assert(cbMem < UINT8_MAX);
9940 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9941 RTUINT64U NewRsp;
9942 NewRsp.u = *puNewRsp;
9943 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9944 *puNewRsp = NewRsp.u;
9945 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9946}
9947
9948
9949/**
9950 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
9951 *
9952 * This will update the rSP.
9953 *
9954 * @returns Strict VBox status code.
9955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9956 * @param pvMem The pointer returned by
9957 * iemMemStackPopBeginSpecial().
9958 * @param uNewRsp The new RSP value returned by
9959 * iemMemStackPopBeginSpecial().
9960 */
9961IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PVMCPU pVCpu, void const *pvMem, uint64_t uNewRsp)
9962{
9963 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9964 if (rcStrict == VINF_SUCCESS)
9965 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9966 return rcStrict;
9967}
9968
9969
9970/**
9971 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
9972 * iemMemStackPopContinueSpecial).
9973 *
9974 * The caller will manually commit the rSP.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param pvMem The pointer returned by
9979 * iemMemStackPopBeginSpecial() or
9980 * iemMemStackPopContinueSpecial().
9981 */
9982IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
9983{
9984 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9985}
9986
9987
9988/**
9989 * Fetches a system table byte.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param pbDst Where to return the byte.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 */
9998IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9999{
10000 /* The lazy approach for now... */
10001 uint8_t const *pbSrc;
10002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10003 if (rc == VINF_SUCCESS)
10004 {
10005 *pbDst = *pbSrc;
10006 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10007 }
10008 return rc;
10009}
10010
10011
10012/**
10013 * Fetches a system table word.
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10017 * @param pu16Dst Where to return the word.
10018 * @param iSegReg The index of the segment register to use for
10019 * this access. The base and limits are checked.
10020 * @param GCPtrMem The address of the guest memory.
10021 */
10022IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10023{
10024 /* The lazy approach for now... */
10025 uint16_t const *pu16Src;
10026 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10027 if (rc == VINF_SUCCESS)
10028 {
10029 *pu16Dst = *pu16Src;
10030 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10031 }
10032 return rc;
10033}
10034
10035
10036/**
10037 * Fetches a system table dword.
10038 *
10039 * @returns Strict VBox status code.
10040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10041 * @param pu32Dst Where to return the dword.
10042 * @param iSegReg The index of the segment register to use for
10043 * this access. The base and limits are checked.
10044 * @param GCPtrMem The address of the guest memory.
10045 */
10046IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10047{
10048 /* The lazy approach for now... */
10049 uint32_t const *pu32Src;
10050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10051 if (rc == VINF_SUCCESS)
10052 {
10053 *pu32Dst = *pu32Src;
10054 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10055 }
10056 return rc;
10057}
10058
10059
10060/**
10061 * Fetches a system table qword.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param pu64Dst Where to return the qword.
10066 * @param iSegReg The index of the segment register to use for
10067 * this access. The base and limits are checked.
10068 * @param GCPtrMem The address of the guest memory.
10069 */
10070IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10071{
10072 /* The lazy approach for now... */
10073 uint64_t const *pu64Src;
10074 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10075 if (rc == VINF_SUCCESS)
10076 {
10077 *pu64Dst = *pu64Src;
10078 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10079 }
10080 return rc;
10081}
10082
10083
10084/**
10085 * Fetches a descriptor table entry with caller specified error code.
10086 *
10087 * @returns Strict VBox status code.
10088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10089 * @param pDesc Where to return the descriptor table entry.
10090 * @param uSel The selector which table entry to fetch.
10091 * @param uXcpt The exception to raise on table lookup error.
10092 * @param uErrorCode The error code associated with the exception.
10093 */
10094IEM_STATIC VBOXSTRICTRC
10095iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10096{
10097 AssertPtr(pDesc);
10098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10099
10100 /** @todo did the 286 require all 8 bytes to be accessible? */
10101 /*
10102 * Get the selector table base and check bounds.
10103 */
10104 RTGCPTR GCPtrBase;
10105 if (uSel & X86_SEL_LDT)
10106 {
10107 if ( !pCtx->ldtr.Attr.n.u1Present
10108 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10109 {
10110 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10111 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10112 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10113 uErrorCode, 0);
10114 }
10115
10116 Assert(pCtx->ldtr.Attr.n.u1Present);
10117 GCPtrBase = pCtx->ldtr.u64Base;
10118 }
10119 else
10120 {
10121 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10122 {
10123 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10124 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10125 uErrorCode, 0);
10126 }
10127 GCPtrBase = pCtx->gdtr.pGdt;
10128 }
10129
10130 /*
10131 * Read the legacy descriptor and maybe the long mode extensions if
10132 * required.
10133 */
10134 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10135 if (rcStrict == VINF_SUCCESS)
10136 {
10137 if ( !IEM_IS_LONG_MODE(pVCpu)
10138 || pDesc->Legacy.Gen.u1DescType)
10139 pDesc->Long.au64[1] = 0;
10140 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10141 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10142 else
10143 {
10144 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10145 /** @todo is this the right exception? */
10146 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10147 }
10148 }
10149 return rcStrict;
10150}
10151
10152
10153/**
10154 * Fetches a descriptor table entry.
10155 *
10156 * @returns Strict VBox status code.
10157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10158 * @param pDesc Where to return the descriptor table entry.
10159 * @param uSel The selector which table entry to fetch.
10160 * @param uXcpt The exception to raise on table lookup error.
10161 */
10162IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10163{
10164 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10165}
10166
10167
10168/**
10169 * Fakes a long mode stack selector for SS = 0.
10170 *
10171 * @param pDescSs Where to return the fake stack descriptor.
10172 * @param uDpl The DPL we want.
10173 */
10174IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10175{
10176 pDescSs->Long.au64[0] = 0;
10177 pDescSs->Long.au64[1] = 0;
10178 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10179 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10180 pDescSs->Long.Gen.u2Dpl = uDpl;
10181 pDescSs->Long.Gen.u1Present = 1;
10182 pDescSs->Long.Gen.u1Long = 1;
10183}
10184
10185
10186/**
10187 * Marks the selector descriptor as accessed (only non-system descriptors).
10188 *
10189 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10190 * will therefore skip the limit checks.
10191 *
10192 * @returns Strict VBox status code.
10193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10194 * @param uSel The selector.
10195 */
10196IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10197{
10198 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10199
10200 /*
10201 * Get the selector table base and calculate the entry address.
10202 */
10203 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10204 ? pCtx->ldtr.u64Base
10205 : pCtx->gdtr.pGdt;
10206 GCPtr += uSel & X86_SEL_MASK;
10207
10208 /*
10209 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10210 * ugly stuff to avoid this. This will make sure it's an atomic access
10211 * as well more or less remove any question about 8-bit or 32-bit accesss.
10212 */
10213 VBOXSTRICTRC rcStrict;
10214 uint32_t volatile *pu32;
10215 if ((GCPtr & 3) == 0)
10216 {
10217 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10218 GCPtr += 2 + 2;
10219 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10220 if (rcStrict != VINF_SUCCESS)
10221 return rcStrict;
10222 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10223 }
10224 else
10225 {
10226 /* The misaligned GDT/LDT case, map the whole thing. */
10227 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10228 if (rcStrict != VINF_SUCCESS)
10229 return rcStrict;
10230 switch ((uintptr_t)pu32 & 3)
10231 {
10232 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10233 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10234 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10235 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10236 }
10237 }
10238
10239 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10240}
10241
10242/** @} */
10243
10244
10245/*
10246 * Include the C/C++ implementation of instruction.
10247 */
10248#include "IEMAllCImpl.cpp.h"
10249
10250
10251
10252/** @name "Microcode" macros.
10253 *
10254 * The idea is that we should be able to use the same code to interpret
10255 * instructions as well as recompiler instructions. Thus this obfuscation.
10256 *
10257 * @{
10258 */
10259#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10260#define IEM_MC_END() }
10261#define IEM_MC_PAUSE() do {} while (0)
10262#define IEM_MC_CONTINUE() do {} while (0)
10263
10264/** Internal macro. */
10265#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10266 do \
10267 { \
10268 VBOXSTRICTRC rcStrict2 = a_Expr; \
10269 if (rcStrict2 != VINF_SUCCESS) \
10270 return rcStrict2; \
10271 } while (0)
10272
10273
10274#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10275#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10276#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10277#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10278#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10279#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10280#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10281#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10282#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10283 do { \
10284 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10285 return iemRaiseDeviceNotAvailable(pVCpu); \
10286 } while (0)
10287#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10288 do { \
10289 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10290 return iemRaiseMathFault(pVCpu); \
10291 } while (0)
10292#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10293 do { \
10294 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10295 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10296 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10297 return iemRaiseUndefinedOpcode(pVCpu); \
10298 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10299 return iemRaiseDeviceNotAvailable(pVCpu); \
10300 } while (0)
10301#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10302 do { \
10303 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10304 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10305 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10306 return iemRaiseUndefinedOpcode(pVCpu); \
10307 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10308 return iemRaiseDeviceNotAvailable(pVCpu); \
10309 } while (0)
10310#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10311 do { \
10312 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10313 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10314 return iemRaiseUndefinedOpcode(pVCpu); \
10315 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10316 return iemRaiseDeviceNotAvailable(pVCpu); \
10317 } while (0)
10318#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10319 do { \
10320 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10321 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10322 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10323 return iemRaiseUndefinedOpcode(pVCpu); \
10324 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10325 return iemRaiseDeviceNotAvailable(pVCpu); \
10326 } while (0)
10327#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10328 do { \
10329 if (pVCpu->iem.s.uCpl != 0) \
10330 return iemRaiseGeneralProtectionFault0(pVCpu); \
10331 } while (0)
10332
10333
10334#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10335#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10336#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10337#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10338#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10339#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10340#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10341 uint32_t a_Name; \
10342 uint32_t *a_pName = &a_Name
10343#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10344 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10345
10346#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10347#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10348
10349#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10350#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10351#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10352#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10353#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10354#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10355#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10356#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10357#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10358#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10359#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10360#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10361#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10362#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10363#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10364#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10365#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10366#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10367#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10368#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10369#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10370#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10371#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10372#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10373#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10374#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10375#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10376#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10377#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10378/** @note Not for IOPL or IF testing or modification. */
10379#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10380#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10381#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10382#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10383
10384#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10385#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10386#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10387#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10388#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10389#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10390#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10391#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10392#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10393#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10394#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10395 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10396
10397#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10398#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10399/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10400 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10401#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10402#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10403/** @note Not for IOPL or IF testing or modification. */
10404#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10405
10406#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10407#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10408#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10409 do { \
10410 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10411 *pu32Reg += (a_u32Value); \
10412 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10413 } while (0)
10414#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10415
10416#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10417#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10418#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10419 do { \
10420 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10421 *pu32Reg -= (a_u32Value); \
10422 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10423 } while (0)
10424#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10425#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10426
10427#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10428#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10429#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10430#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10431#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10432#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10433#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10434
10435#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10436#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10437#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10438#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10439
10440#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10441#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10442#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10443
10444#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10445#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10446#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10447
10448#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10449#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10450#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10451
10452#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10453#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10454#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10455
10456#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10457
10458#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10459
10460#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10461#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10462#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10463 do { \
10464 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10465 *pu32Reg &= (a_u32Value); \
10466 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10467 } while (0)
10468#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10469
10470#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10471#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10472#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10473 do { \
10474 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10475 *pu32Reg |= (a_u32Value); \
10476 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10477 } while (0)
10478#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10479
10480
10481/** @note Not for IOPL or IF modification. */
10482#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10483/** @note Not for IOPL or IF modification. */
10484#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10485/** @note Not for IOPL or IF modification. */
10486#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10487
10488#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10489
10490
10491#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10492 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10493#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10494 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10495#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10496 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10497#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10498 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10499#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10500 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10501#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10502 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10503#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10504 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10505
10506#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10507 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10508#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10509 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10510#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10511 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10512#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10513 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10514#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10515 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10516#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10517 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10518 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10519 } while (0)
10520#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10521 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10522 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10523 } while (0)
10524#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10525 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10526#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10527 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10528#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10529 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10530#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10531 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10532 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10533
10534#ifndef IEM_WITH_SETJMP
10535# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10536 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10537# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10538 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10539# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10541#else
10542# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10543 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10544# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10545 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10546# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10547 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10548#endif
10549
10550#ifndef IEM_WITH_SETJMP
10551# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10552 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10553# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10555# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10557#else
10558# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10559 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10560# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10561 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10562# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10563 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10564#endif
10565
10566#ifndef IEM_WITH_SETJMP
10567# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10568 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10569# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10570 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10571# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10572 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10573#else
10574# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10575 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10576# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10577 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10578# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10579 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10580#endif
10581
10582#ifdef SOME_UNUSED_FUNCTION
10583# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10584 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10585#endif
10586
10587#ifndef IEM_WITH_SETJMP
10588# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10590# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10592# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10594# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10596#else
10597# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10598 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10599# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10600 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10601# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10602 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10603# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10604 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10605#endif
10606
10607#ifndef IEM_WITH_SETJMP
10608# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10609 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10610# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10612# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10613 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10614#else
10615# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10616 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10617# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10618 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10619# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10620 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10621#endif
10622
10623#ifndef IEM_WITH_SETJMP
10624# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10626# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10627 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10628#else
10629# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10630 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10631# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10632 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10633#endif
10634
10635
10636
10637#ifndef IEM_WITH_SETJMP
10638# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10639 do { \
10640 uint8_t u8Tmp; \
10641 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10642 (a_u16Dst) = u8Tmp; \
10643 } while (0)
10644# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10645 do { \
10646 uint8_t u8Tmp; \
10647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10648 (a_u32Dst) = u8Tmp; \
10649 } while (0)
10650# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10651 do { \
10652 uint8_t u8Tmp; \
10653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10654 (a_u64Dst) = u8Tmp; \
10655 } while (0)
10656# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10657 do { \
10658 uint16_t u16Tmp; \
10659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10660 (a_u32Dst) = u16Tmp; \
10661 } while (0)
10662# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10663 do { \
10664 uint16_t u16Tmp; \
10665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10666 (a_u64Dst) = u16Tmp; \
10667 } while (0)
10668# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10669 do { \
10670 uint32_t u32Tmp; \
10671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10672 (a_u64Dst) = u32Tmp; \
10673 } while (0)
10674#else /* IEM_WITH_SETJMP */
10675# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10676 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10677# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10678 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10679# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10680 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10681# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10682 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10683# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10684 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10685# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10686 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10687#endif /* IEM_WITH_SETJMP */
10688
10689#ifndef IEM_WITH_SETJMP
10690# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10691 do { \
10692 uint8_t u8Tmp; \
10693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10694 (a_u16Dst) = (int8_t)u8Tmp; \
10695 } while (0)
10696# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10697 do { \
10698 uint8_t u8Tmp; \
10699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10700 (a_u32Dst) = (int8_t)u8Tmp; \
10701 } while (0)
10702# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10703 do { \
10704 uint8_t u8Tmp; \
10705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10706 (a_u64Dst) = (int8_t)u8Tmp; \
10707 } while (0)
10708# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10709 do { \
10710 uint16_t u16Tmp; \
10711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10712 (a_u32Dst) = (int16_t)u16Tmp; \
10713 } while (0)
10714# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10715 do { \
10716 uint16_t u16Tmp; \
10717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10718 (a_u64Dst) = (int16_t)u16Tmp; \
10719 } while (0)
10720# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10721 do { \
10722 uint32_t u32Tmp; \
10723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10724 (a_u64Dst) = (int32_t)u32Tmp; \
10725 } while (0)
10726#else /* IEM_WITH_SETJMP */
10727# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10728 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10729# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10730 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10731# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10732 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10733# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10734 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10735# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10736 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10737# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10738 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10739#endif /* IEM_WITH_SETJMP */
10740
10741#ifndef IEM_WITH_SETJMP
10742# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10743 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10744# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10745 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10746# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10747 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10748# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10749 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10750#else
10751# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10752 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10753# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10754 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10755# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10756 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10757# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10758 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10759#endif
10760
10761#ifndef IEM_WITH_SETJMP
10762# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10763 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10764# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10765 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10766# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10767 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10768# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10769 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10770#else
10771# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10772 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10773# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10774 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10775# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10776 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10777# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10778 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10779#endif
10780
10781#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10782#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10783#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10784#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10785#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10786#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10787#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10788 do { \
10789 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10790 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10791 } while (0)
10792
10793#ifndef IEM_WITH_SETJMP
10794# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10796# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10797 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10798#else
10799# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10800 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10801# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10802 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10803#endif
10804
10805
10806#define IEM_MC_PUSH_U16(a_u16Value) \
10807 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10808#define IEM_MC_PUSH_U32(a_u32Value) \
10809 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10810#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10811 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10812#define IEM_MC_PUSH_U64(a_u64Value) \
10813 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10814
10815#define IEM_MC_POP_U16(a_pu16Value) \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10817#define IEM_MC_POP_U32(a_pu32Value) \
10818 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10819#define IEM_MC_POP_U64(a_pu64Value) \
10820 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10821
10822/** Maps guest memory for direct or bounce buffered access.
10823 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10824 * @remarks May return.
10825 */
10826#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10827 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10828
10829/** Maps guest memory for direct or bounce buffered access.
10830 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10831 * @remarks May return.
10832 */
10833#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10834 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10835
10836/** Commits the memory and unmaps the guest memory.
10837 * @remarks May return.
10838 */
10839#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10840 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10841
10842/** Commits the memory and unmaps the guest memory unless the FPU status word
10843 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10844 * that would cause FLD not to store.
10845 *
10846 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10847 * store, while \#P will not.
10848 *
10849 * @remarks May in theory return - for now.
10850 */
10851#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10852 do { \
10853 if ( !(a_u16FSW & X86_FSW_ES) \
10854 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10855 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10856 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10857 } while (0)
10858
10859/** Calculate efficient address from R/M. */
10860#ifndef IEM_WITH_SETJMP
10861# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10862 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10863#else
10864# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10865 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10866#endif
10867
10868#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10869#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10870#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10871#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10872#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10873#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10874#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10875
10876/**
10877 * Defers the rest of the instruction emulation to a C implementation routine
10878 * and returns, only taking the standard parameters.
10879 *
10880 * @param a_pfnCImpl The pointer to the C routine.
10881 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10882 */
10883#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10884
10885/**
10886 * Defers the rest of instruction emulation to a C implementation routine and
10887 * returns, taking one argument in addition to the standard ones.
10888 *
10889 * @param a_pfnCImpl The pointer to the C routine.
10890 * @param a0 The argument.
10891 */
10892#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10893
10894/**
10895 * Defers the rest of the instruction emulation to a C implementation routine
10896 * and returns, taking two arguments in addition to the standard ones.
10897 *
10898 * @param a_pfnCImpl The pointer to the C routine.
10899 * @param a0 The first extra argument.
10900 * @param a1 The second extra argument.
10901 */
10902#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10903
10904/**
10905 * Defers the rest of the instruction emulation to a C implementation routine
10906 * and returns, taking three arguments in addition to the standard ones.
10907 *
10908 * @param a_pfnCImpl The pointer to the C routine.
10909 * @param a0 The first extra argument.
10910 * @param a1 The second extra argument.
10911 * @param a2 The third extra argument.
10912 */
10913#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10914
10915/**
10916 * Defers the rest of the instruction emulation to a C implementation routine
10917 * and returns, taking four arguments in addition to the standard ones.
10918 *
10919 * @param a_pfnCImpl The pointer to the C routine.
10920 * @param a0 The first extra argument.
10921 * @param a1 The second extra argument.
10922 * @param a2 The third extra argument.
10923 * @param a3 The fourth extra argument.
10924 */
10925#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
10926
10927/**
10928 * Defers the rest of the instruction emulation to a C implementation routine
10929 * and returns, taking two arguments in addition to the standard ones.
10930 *
10931 * @param a_pfnCImpl The pointer to the C routine.
10932 * @param a0 The first extra argument.
10933 * @param a1 The second extra argument.
10934 * @param a2 The third extra argument.
10935 * @param a3 The fourth extra argument.
10936 * @param a4 The fifth extra argument.
10937 */
10938#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
10939
10940/**
10941 * Defers the entire instruction emulation to a C implementation routine and
10942 * returns, only taking the standard parameters.
10943 *
10944 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10945 *
10946 * @param a_pfnCImpl The pointer to the C routine.
10947 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10948 */
10949#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10950
10951/**
10952 * Defers the entire instruction emulation to a C implementation routine and
10953 * returns, taking one argument in addition to the standard ones.
10954 *
10955 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10956 *
10957 * @param a_pfnCImpl The pointer to the C routine.
10958 * @param a0 The argument.
10959 */
10960#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10961
10962/**
10963 * Defers the entire instruction emulation to a C implementation routine and
10964 * returns, taking two arguments in addition to the standard ones.
10965 *
10966 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10967 *
10968 * @param a_pfnCImpl The pointer to the C routine.
10969 * @param a0 The first extra argument.
10970 * @param a1 The second extra argument.
10971 */
10972#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10973
10974/**
10975 * Defers the entire instruction emulation to a C implementation routine and
10976 * returns, taking three arguments in addition to the standard ones.
10977 *
10978 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10979 *
10980 * @param a_pfnCImpl The pointer to the C routine.
10981 * @param a0 The first extra argument.
10982 * @param a1 The second extra argument.
10983 * @param a2 The third extra argument.
10984 */
10985#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10986
10987/**
10988 * Calls a FPU assembly implementation taking one visible argument.
10989 *
10990 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10991 * @param a0 The first extra argument.
10992 */
10993#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
10994 do { \
10995 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
10996 } while (0)
10997
10998/**
10999 * Calls a FPU assembly implementation taking two visible arguments.
11000 *
11001 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11002 * @param a0 The first extra argument.
11003 * @param a1 The second extra argument.
11004 */
11005#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11006 do { \
11007 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11008 } while (0)
11009
11010/**
11011 * Calls a FPU assembly implementation taking three visible arguments.
11012 *
11013 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11014 * @param a0 The first extra argument.
11015 * @param a1 The second extra argument.
11016 * @param a2 The third extra argument.
11017 */
11018#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11019 do { \
11020 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11021 } while (0)
11022
11023#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11024 do { \
11025 (a_FpuData).FSW = (a_FSW); \
11026 (a_FpuData).r80Result = *(a_pr80Value); \
11027 } while (0)
11028
11029/** Pushes FPU result onto the stack. */
11030#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11031 iemFpuPushResult(pVCpu, &a_FpuData)
11032/** Pushes FPU result onto the stack and sets the FPUDP. */
11033#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11034 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11035
11036/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11037#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11038 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11039
11040/** Stores FPU result in a stack register. */
11041#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11042 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11043/** Stores FPU result in a stack register and pops the stack. */
11044#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11045 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11046/** Stores FPU result in a stack register and sets the FPUDP. */
11047#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11048 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11049/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11050 * stack. */
11051#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11052 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11053
11054/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11055#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11056 iemFpuUpdateOpcodeAndIp(pVCpu)
11057/** Free a stack register (for FFREE and FFREEP). */
11058#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11059 iemFpuStackFree(pVCpu, a_iStReg)
11060/** Increment the FPU stack pointer. */
11061#define IEM_MC_FPU_STACK_INC_TOP() \
11062 iemFpuStackIncTop(pVCpu)
11063/** Decrement the FPU stack pointer. */
11064#define IEM_MC_FPU_STACK_DEC_TOP() \
11065 iemFpuStackDecTop(pVCpu)
11066
11067/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11068#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11069 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11070/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11071#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11072 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11073/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11074#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11075 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11076/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11077#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11078 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11079/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11080 * stack. */
11081#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11082 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11083/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11084#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11085 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11086
11087/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11088#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11089 iemFpuStackUnderflow(pVCpu, a_iStDst)
11090/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11091 * stack. */
11092#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11093 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11094/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11095 * FPUDS. */
11096#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11097 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11098/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11099 * FPUDS. Pops stack. */
11100#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11101 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11102/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11103 * stack twice. */
11104#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11105 iemFpuStackUnderflowThenPopPop(pVCpu)
11106/** Raises a FPU stack underflow exception for an instruction pushing a result
11107 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11108#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11109 iemFpuStackPushUnderflow(pVCpu)
11110/** Raises a FPU stack underflow exception for an instruction pushing a result
11111 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11112#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11113 iemFpuStackPushUnderflowTwo(pVCpu)
11114
11115/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11116 * FPUIP, FPUCS and FOP. */
11117#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11118 iemFpuStackPushOverflow(pVCpu)
11119/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11120 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11121#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11122 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11123/** Prepares for using the FPU state.
11124 * Ensures that we can use the host FPU in the current context (RC+R0.
11125 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11126#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11127/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11128#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11129/** Actualizes the guest FPU state so it can be accessed and modified. */
11130#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11131
11132/** Prepares for using the SSE state.
11133 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11134 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11135#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11136/** Actualizes the guest XMM0..15 register state for read-only access. */
11137#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11138/** Actualizes the guest XMM0..15 register state for read-write access. */
11139#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11140
11141/**
11142 * Calls a MMX assembly implementation taking two visible arguments.
11143 *
11144 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11145 * @param a0 The first extra argument.
11146 * @param a1 The second extra argument.
11147 */
11148#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11149 do { \
11150 IEM_MC_PREPARE_FPU_USAGE(); \
11151 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11152 } while (0)
11153
11154/**
11155 * Calls a MMX assembly implementation taking three visible arguments.
11156 *
11157 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11158 * @param a0 The first extra argument.
11159 * @param a1 The second extra argument.
11160 * @param a2 The third extra argument.
11161 */
11162#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11163 do { \
11164 IEM_MC_PREPARE_FPU_USAGE(); \
11165 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11166 } while (0)
11167
11168
11169/**
11170 * Calls a SSE assembly implementation taking two visible arguments.
11171 *
11172 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11173 * @param a0 The first extra argument.
11174 * @param a1 The second extra argument.
11175 */
11176#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11177 do { \
11178 IEM_MC_PREPARE_SSE_USAGE(); \
11179 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11180 } while (0)
11181
11182/**
11183 * Calls a SSE assembly implementation taking three visible arguments.
11184 *
11185 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11186 * @param a0 The first extra argument.
11187 * @param a1 The second extra argument.
11188 * @param a2 The third extra argument.
11189 */
11190#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11191 do { \
11192 IEM_MC_PREPARE_SSE_USAGE(); \
11193 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11194 } while (0)
11195
11196/** @note Not for IOPL or IF testing. */
11197#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11198/** @note Not for IOPL or IF testing. */
11199#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11200/** @note Not for IOPL or IF testing. */
11201#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11202/** @note Not for IOPL or IF testing. */
11203#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11204/** @note Not for IOPL or IF testing. */
11205#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11206 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11207 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11208/** @note Not for IOPL or IF testing. */
11209#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11210 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11211 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11212/** @note Not for IOPL or IF testing. */
11213#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11214 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11215 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11216 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11217/** @note Not for IOPL or IF testing. */
11218#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11219 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11220 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11221 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11222#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11223#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11224#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11225/** @note Not for IOPL or IF testing. */
11226#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11227 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11228 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11229/** @note Not for IOPL or IF testing. */
11230#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11231 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11232 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11233/** @note Not for IOPL or IF testing. */
11234#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11235 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11236 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11237/** @note Not for IOPL or IF testing. */
11238#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11239 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11240 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11241/** @note Not for IOPL or IF testing. */
11242#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11243 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11244 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11245/** @note Not for IOPL or IF testing. */
11246#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11247 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11248 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11249#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11250#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11251
11252#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11253 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11254#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11255 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11256#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11257 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11258#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11259 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11260#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11261 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11262#define IEM_MC_IF_FCW_IM() \
11263 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11264
11265#define IEM_MC_ELSE() } else {
11266#define IEM_MC_ENDIF() } do {} while (0)
11267
11268/** @} */
11269
11270
11271/** @name Opcode Debug Helpers.
11272 * @{
11273 */
11274#ifdef DEBUG
11275# define IEMOP_MNEMONIC(a_szMnemonic) \
11276 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11277 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11278# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11279 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11280 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11281#else
11282# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11283# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11284#endif
11285
11286/** @} */
11287
11288
11289/** @name Opcode Helpers.
11290 * @{
11291 */
11292
11293#ifdef IN_RING3
11294# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11295 do { \
11296 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11297 else \
11298 { \
11299 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11300 return IEMOP_RAISE_INVALID_OPCODE(); \
11301 } \
11302 } while (0)
11303#else
11304# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11305 do { \
11306 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11307 else return IEMOP_RAISE_INVALID_OPCODE(); \
11308 } while (0)
11309#endif
11310
11311/** The instruction requires a 186 or later. */
11312#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11313# define IEMOP_HLP_MIN_186() do { } while (0)
11314#else
11315# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11316#endif
11317
11318/** The instruction requires a 286 or later. */
11319#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11320# define IEMOP_HLP_MIN_286() do { } while (0)
11321#else
11322# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11323#endif
11324
11325/** The instruction requires a 386 or later. */
11326#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11327# define IEMOP_HLP_MIN_386() do { } while (0)
11328#else
11329# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11330#endif
11331
11332/** The instruction requires a 386 or later if the given expression is true. */
11333#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11334# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11335#else
11336# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11337#endif
11338
11339/** The instruction requires a 486 or later. */
11340#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11341# define IEMOP_HLP_MIN_486() do { } while (0)
11342#else
11343# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11344#endif
11345
11346/** The instruction requires a Pentium (586) or later. */
11347#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11348# define IEMOP_HLP_MIN_586() do { } while (0)
11349#else
11350# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11351#endif
11352
11353/** The instruction requires a PentiumPro (686) or later. */
11354#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11355# define IEMOP_HLP_MIN_686() do { } while (0)
11356#else
11357# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11358#endif
11359
11360
11361/** The instruction raises an \#UD in real and V8086 mode. */
11362#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11363 do \
11364 { \
11365 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11366 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11367 } while (0)
11368
11369/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11370 * 64-bit mode. */
11371#define IEMOP_HLP_NO_64BIT() \
11372 do \
11373 { \
11374 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11375 return IEMOP_RAISE_INVALID_OPCODE(); \
11376 } while (0)
11377
11378/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11379 * 64-bit mode. */
11380#define IEMOP_HLP_ONLY_64BIT() \
11381 do \
11382 { \
11383 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11384 return IEMOP_RAISE_INVALID_OPCODE(); \
11385 } while (0)
11386
11387/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11388#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11389 do \
11390 { \
11391 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11392 iemRecalEffOpSize64Default(pVCpu); \
11393 } while (0)
11394
11395/** The instruction has 64-bit operand size if 64-bit mode. */
11396#define IEMOP_HLP_64BIT_OP_SIZE() \
11397 do \
11398 { \
11399 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11400 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11401 } while (0)
11402
11403/** Only a REX prefix immediately preceeding the first opcode byte takes
11404 * effect. This macro helps ensuring this as well as logging bad guest code. */
11405#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11406 do \
11407 { \
11408 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11409 { \
11410 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11411 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11412 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11413 pVCpu->iem.s.uRexB = 0; \
11414 pVCpu->iem.s.uRexIndex = 0; \
11415 pVCpu->iem.s.uRexReg = 0; \
11416 iemRecalEffOpSize(pVCpu); \
11417 } \
11418 } while (0)
11419
11420/**
11421 * Done decoding.
11422 */
11423#define IEMOP_HLP_DONE_DECODING() \
11424 do \
11425 { \
11426 /*nothing for now, maybe later... */ \
11427 } while (0)
11428
11429/**
11430 * Done decoding, raise \#UD exception if lock prefix present.
11431 */
11432#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11433 do \
11434 { \
11435 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11436 { /* likely */ } \
11437 else \
11438 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11439 } while (0)
11440#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11441 do \
11442 { \
11443 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11444 { /* likely */ } \
11445 else \
11446 { \
11447 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11448 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11449 } \
11450 } while (0)
11451#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11452 do \
11453 { \
11454 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11455 { /* likely */ } \
11456 else \
11457 { \
11458 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11459 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11460 } \
11461 } while (0)
11462
11463/**
11464 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11465 * are present.
11466 */
11467#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11468 do \
11469 { \
11470 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11471 { /* likely */ } \
11472 else \
11473 return IEMOP_RAISE_INVALID_OPCODE(); \
11474 } while (0)
11475
11476
11477/**
11478 * Calculates the effective address of a ModR/M memory operand.
11479 *
11480 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11481 *
11482 * @return Strict VBox status code.
11483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11484 * @param bRm The ModRM byte.
11485 * @param cbImm The size of any immediate following the
11486 * effective address opcode bytes. Important for
11487 * RIP relative addressing.
11488 * @param pGCPtrEff Where to return the effective address.
11489 */
11490IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11491{
11492 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11493 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11494# define SET_SS_DEF() \
11495 do \
11496 { \
11497 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11498 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11499 } while (0)
11500
11501 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11502 {
11503/** @todo Check the effective address size crap! */
11504 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11505 {
11506 uint16_t u16EffAddr;
11507
11508 /* Handle the disp16 form with no registers first. */
11509 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11510 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11511 else
11512 {
11513 /* Get the displacment. */
11514 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11515 {
11516 case 0: u16EffAddr = 0; break;
11517 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11518 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11519 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11520 }
11521
11522 /* Add the base and index registers to the disp. */
11523 switch (bRm & X86_MODRM_RM_MASK)
11524 {
11525 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11526 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11527 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11528 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11529 case 4: u16EffAddr += pCtx->si; break;
11530 case 5: u16EffAddr += pCtx->di; break;
11531 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11532 case 7: u16EffAddr += pCtx->bx; break;
11533 }
11534 }
11535
11536 *pGCPtrEff = u16EffAddr;
11537 }
11538 else
11539 {
11540 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11541 uint32_t u32EffAddr;
11542
11543 /* Handle the disp32 form with no registers first. */
11544 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11545 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11546 else
11547 {
11548 /* Get the register (or SIB) value. */
11549 switch ((bRm & X86_MODRM_RM_MASK))
11550 {
11551 case 0: u32EffAddr = pCtx->eax; break;
11552 case 1: u32EffAddr = pCtx->ecx; break;
11553 case 2: u32EffAddr = pCtx->edx; break;
11554 case 3: u32EffAddr = pCtx->ebx; break;
11555 case 4: /* SIB */
11556 {
11557 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11558
11559 /* Get the index and scale it. */
11560 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11561 {
11562 case 0: u32EffAddr = pCtx->eax; break;
11563 case 1: u32EffAddr = pCtx->ecx; break;
11564 case 2: u32EffAddr = pCtx->edx; break;
11565 case 3: u32EffAddr = pCtx->ebx; break;
11566 case 4: u32EffAddr = 0; /*none */ break;
11567 case 5: u32EffAddr = pCtx->ebp; break;
11568 case 6: u32EffAddr = pCtx->esi; break;
11569 case 7: u32EffAddr = pCtx->edi; break;
11570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11571 }
11572 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11573
11574 /* add base */
11575 switch (bSib & X86_SIB_BASE_MASK)
11576 {
11577 case 0: u32EffAddr += pCtx->eax; break;
11578 case 1: u32EffAddr += pCtx->ecx; break;
11579 case 2: u32EffAddr += pCtx->edx; break;
11580 case 3: u32EffAddr += pCtx->ebx; break;
11581 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11582 case 5:
11583 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11584 {
11585 u32EffAddr += pCtx->ebp;
11586 SET_SS_DEF();
11587 }
11588 else
11589 {
11590 uint32_t u32Disp;
11591 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11592 u32EffAddr += u32Disp;
11593 }
11594 break;
11595 case 6: u32EffAddr += pCtx->esi; break;
11596 case 7: u32EffAddr += pCtx->edi; break;
11597 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11598 }
11599 break;
11600 }
11601 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11602 case 6: u32EffAddr = pCtx->esi; break;
11603 case 7: u32EffAddr = pCtx->edi; break;
11604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11605 }
11606
11607 /* Get and add the displacement. */
11608 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11609 {
11610 case 0:
11611 break;
11612 case 1:
11613 {
11614 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11615 u32EffAddr += i8Disp;
11616 break;
11617 }
11618 case 2:
11619 {
11620 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11621 u32EffAddr += u32Disp;
11622 break;
11623 }
11624 default:
11625 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11626 }
11627
11628 }
11629 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11630 *pGCPtrEff = u32EffAddr;
11631 else
11632 {
11633 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11634 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11635 }
11636 }
11637 }
11638 else
11639 {
11640 uint64_t u64EffAddr;
11641
11642 /* Handle the rip+disp32 form with no registers first. */
11643 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11644 {
11645 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11646 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11647 }
11648 else
11649 {
11650 /* Get the register (or SIB) value. */
11651 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11652 {
11653 case 0: u64EffAddr = pCtx->rax; break;
11654 case 1: u64EffAddr = pCtx->rcx; break;
11655 case 2: u64EffAddr = pCtx->rdx; break;
11656 case 3: u64EffAddr = pCtx->rbx; break;
11657 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11658 case 6: u64EffAddr = pCtx->rsi; break;
11659 case 7: u64EffAddr = pCtx->rdi; break;
11660 case 8: u64EffAddr = pCtx->r8; break;
11661 case 9: u64EffAddr = pCtx->r9; break;
11662 case 10: u64EffAddr = pCtx->r10; break;
11663 case 11: u64EffAddr = pCtx->r11; break;
11664 case 13: u64EffAddr = pCtx->r13; break;
11665 case 14: u64EffAddr = pCtx->r14; break;
11666 case 15: u64EffAddr = pCtx->r15; break;
11667 /* SIB */
11668 case 4:
11669 case 12:
11670 {
11671 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11672
11673 /* Get the index and scale it. */
11674 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11675 {
11676 case 0: u64EffAddr = pCtx->rax; break;
11677 case 1: u64EffAddr = pCtx->rcx; break;
11678 case 2: u64EffAddr = pCtx->rdx; break;
11679 case 3: u64EffAddr = pCtx->rbx; break;
11680 case 4: u64EffAddr = 0; /*none */ break;
11681 case 5: u64EffAddr = pCtx->rbp; break;
11682 case 6: u64EffAddr = pCtx->rsi; break;
11683 case 7: u64EffAddr = pCtx->rdi; break;
11684 case 8: u64EffAddr = pCtx->r8; break;
11685 case 9: u64EffAddr = pCtx->r9; break;
11686 case 10: u64EffAddr = pCtx->r10; break;
11687 case 11: u64EffAddr = pCtx->r11; break;
11688 case 12: u64EffAddr = pCtx->r12; break;
11689 case 13: u64EffAddr = pCtx->r13; break;
11690 case 14: u64EffAddr = pCtx->r14; break;
11691 case 15: u64EffAddr = pCtx->r15; break;
11692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11693 }
11694 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11695
11696 /* add base */
11697 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11698 {
11699 case 0: u64EffAddr += pCtx->rax; break;
11700 case 1: u64EffAddr += pCtx->rcx; break;
11701 case 2: u64EffAddr += pCtx->rdx; break;
11702 case 3: u64EffAddr += pCtx->rbx; break;
11703 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11704 case 6: u64EffAddr += pCtx->rsi; break;
11705 case 7: u64EffAddr += pCtx->rdi; break;
11706 case 8: u64EffAddr += pCtx->r8; break;
11707 case 9: u64EffAddr += pCtx->r9; break;
11708 case 10: u64EffAddr += pCtx->r10; break;
11709 case 11: u64EffAddr += pCtx->r11; break;
11710 case 12: u64EffAddr += pCtx->r12; break;
11711 case 14: u64EffAddr += pCtx->r14; break;
11712 case 15: u64EffAddr += pCtx->r15; break;
11713 /* complicated encodings */
11714 case 5:
11715 case 13:
11716 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11717 {
11718 if (!pVCpu->iem.s.uRexB)
11719 {
11720 u64EffAddr += pCtx->rbp;
11721 SET_SS_DEF();
11722 }
11723 else
11724 u64EffAddr += pCtx->r13;
11725 }
11726 else
11727 {
11728 uint32_t u32Disp;
11729 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11730 u64EffAddr += (int32_t)u32Disp;
11731 }
11732 break;
11733 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11734 }
11735 break;
11736 }
11737 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11738 }
11739
11740 /* Get and add the displacement. */
11741 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11742 {
11743 case 0:
11744 break;
11745 case 1:
11746 {
11747 int8_t i8Disp;
11748 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11749 u64EffAddr += i8Disp;
11750 break;
11751 }
11752 case 2:
11753 {
11754 uint32_t u32Disp;
11755 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11756 u64EffAddr += (int32_t)u32Disp;
11757 break;
11758 }
11759 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11760 }
11761
11762 }
11763
11764 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11765 *pGCPtrEff = u64EffAddr;
11766 else
11767 {
11768 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11769 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11770 }
11771 }
11772
11773 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11774 return VINF_SUCCESS;
11775}
11776
11777
11778/**
11779 * Calculates the effective address of a ModR/M memory operand.
11780 *
11781 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11782 *
11783 * @return Strict VBox status code.
11784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11785 * @param bRm The ModRM byte.
11786 * @param cbImm The size of any immediate following the
11787 * effective address opcode bytes. Important for
11788 * RIP relative addressing.
11789 * @param pGCPtrEff Where to return the effective address.
11790 * @param offRsp RSP displacement.
11791 */
11792IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11793{
11794 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11795 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11796# define SET_SS_DEF() \
11797 do \
11798 { \
11799 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11800 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11801 } while (0)
11802
11803 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11804 {
11805/** @todo Check the effective address size crap! */
11806 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11807 {
11808 uint16_t u16EffAddr;
11809
11810 /* Handle the disp16 form with no registers first. */
11811 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11812 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11813 else
11814 {
11815 /* Get the displacment. */
11816 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11817 {
11818 case 0: u16EffAddr = 0; break;
11819 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11820 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11821 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11822 }
11823
11824 /* Add the base and index registers to the disp. */
11825 switch (bRm & X86_MODRM_RM_MASK)
11826 {
11827 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11828 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11829 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11830 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11831 case 4: u16EffAddr += pCtx->si; break;
11832 case 5: u16EffAddr += pCtx->di; break;
11833 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11834 case 7: u16EffAddr += pCtx->bx; break;
11835 }
11836 }
11837
11838 *pGCPtrEff = u16EffAddr;
11839 }
11840 else
11841 {
11842 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11843 uint32_t u32EffAddr;
11844
11845 /* Handle the disp32 form with no registers first. */
11846 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11847 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11848 else
11849 {
11850 /* Get the register (or SIB) value. */
11851 switch ((bRm & X86_MODRM_RM_MASK))
11852 {
11853 case 0: u32EffAddr = pCtx->eax; break;
11854 case 1: u32EffAddr = pCtx->ecx; break;
11855 case 2: u32EffAddr = pCtx->edx; break;
11856 case 3: u32EffAddr = pCtx->ebx; break;
11857 case 4: /* SIB */
11858 {
11859 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11860
11861 /* Get the index and scale it. */
11862 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11863 {
11864 case 0: u32EffAddr = pCtx->eax; break;
11865 case 1: u32EffAddr = pCtx->ecx; break;
11866 case 2: u32EffAddr = pCtx->edx; break;
11867 case 3: u32EffAddr = pCtx->ebx; break;
11868 case 4: u32EffAddr = 0; /*none */ break;
11869 case 5: u32EffAddr = pCtx->ebp; break;
11870 case 6: u32EffAddr = pCtx->esi; break;
11871 case 7: u32EffAddr = pCtx->edi; break;
11872 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11873 }
11874 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11875
11876 /* add base */
11877 switch (bSib & X86_SIB_BASE_MASK)
11878 {
11879 case 0: u32EffAddr += pCtx->eax; break;
11880 case 1: u32EffAddr += pCtx->ecx; break;
11881 case 2: u32EffAddr += pCtx->edx; break;
11882 case 3: u32EffAddr += pCtx->ebx; break;
11883 case 4:
11884 u32EffAddr += pCtx->esp + offRsp;
11885 SET_SS_DEF();
11886 break;
11887 case 5:
11888 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11889 {
11890 u32EffAddr += pCtx->ebp;
11891 SET_SS_DEF();
11892 }
11893 else
11894 {
11895 uint32_t u32Disp;
11896 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11897 u32EffAddr += u32Disp;
11898 }
11899 break;
11900 case 6: u32EffAddr += pCtx->esi; break;
11901 case 7: u32EffAddr += pCtx->edi; break;
11902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11903 }
11904 break;
11905 }
11906 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11907 case 6: u32EffAddr = pCtx->esi; break;
11908 case 7: u32EffAddr = pCtx->edi; break;
11909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11910 }
11911
11912 /* Get and add the displacement. */
11913 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11914 {
11915 case 0:
11916 break;
11917 case 1:
11918 {
11919 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11920 u32EffAddr += i8Disp;
11921 break;
11922 }
11923 case 2:
11924 {
11925 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11926 u32EffAddr += u32Disp;
11927 break;
11928 }
11929 default:
11930 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11931 }
11932
11933 }
11934 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11935 *pGCPtrEff = u32EffAddr;
11936 else
11937 {
11938 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11939 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11940 }
11941 }
11942 }
11943 else
11944 {
11945 uint64_t u64EffAddr;
11946
11947 /* Handle the rip+disp32 form with no registers first. */
11948 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11949 {
11950 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11951 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11952 }
11953 else
11954 {
11955 /* Get the register (or SIB) value. */
11956 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11957 {
11958 case 0: u64EffAddr = pCtx->rax; break;
11959 case 1: u64EffAddr = pCtx->rcx; break;
11960 case 2: u64EffAddr = pCtx->rdx; break;
11961 case 3: u64EffAddr = pCtx->rbx; break;
11962 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11963 case 6: u64EffAddr = pCtx->rsi; break;
11964 case 7: u64EffAddr = pCtx->rdi; break;
11965 case 8: u64EffAddr = pCtx->r8; break;
11966 case 9: u64EffAddr = pCtx->r9; break;
11967 case 10: u64EffAddr = pCtx->r10; break;
11968 case 11: u64EffAddr = pCtx->r11; break;
11969 case 13: u64EffAddr = pCtx->r13; break;
11970 case 14: u64EffAddr = pCtx->r14; break;
11971 case 15: u64EffAddr = pCtx->r15; break;
11972 /* SIB */
11973 case 4:
11974 case 12:
11975 {
11976 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11977
11978 /* Get the index and scale it. */
11979 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11980 {
11981 case 0: u64EffAddr = pCtx->rax; break;
11982 case 1: u64EffAddr = pCtx->rcx; break;
11983 case 2: u64EffAddr = pCtx->rdx; break;
11984 case 3: u64EffAddr = pCtx->rbx; break;
11985 case 4: u64EffAddr = 0; /*none */ break;
11986 case 5: u64EffAddr = pCtx->rbp; break;
11987 case 6: u64EffAddr = pCtx->rsi; break;
11988 case 7: u64EffAddr = pCtx->rdi; break;
11989 case 8: u64EffAddr = pCtx->r8; break;
11990 case 9: u64EffAddr = pCtx->r9; break;
11991 case 10: u64EffAddr = pCtx->r10; break;
11992 case 11: u64EffAddr = pCtx->r11; break;
11993 case 12: u64EffAddr = pCtx->r12; break;
11994 case 13: u64EffAddr = pCtx->r13; break;
11995 case 14: u64EffAddr = pCtx->r14; break;
11996 case 15: u64EffAddr = pCtx->r15; break;
11997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11998 }
11999 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12000
12001 /* add base */
12002 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12003 {
12004 case 0: u64EffAddr += pCtx->rax; break;
12005 case 1: u64EffAddr += pCtx->rcx; break;
12006 case 2: u64EffAddr += pCtx->rdx; break;
12007 case 3: u64EffAddr += pCtx->rbx; break;
12008 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12009 case 6: u64EffAddr += pCtx->rsi; break;
12010 case 7: u64EffAddr += pCtx->rdi; break;
12011 case 8: u64EffAddr += pCtx->r8; break;
12012 case 9: u64EffAddr += pCtx->r9; break;
12013 case 10: u64EffAddr += pCtx->r10; break;
12014 case 11: u64EffAddr += pCtx->r11; break;
12015 case 12: u64EffAddr += pCtx->r12; break;
12016 case 14: u64EffAddr += pCtx->r14; break;
12017 case 15: u64EffAddr += pCtx->r15; break;
12018 /* complicated encodings */
12019 case 5:
12020 case 13:
12021 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12022 {
12023 if (!pVCpu->iem.s.uRexB)
12024 {
12025 u64EffAddr += pCtx->rbp;
12026 SET_SS_DEF();
12027 }
12028 else
12029 u64EffAddr += pCtx->r13;
12030 }
12031 else
12032 {
12033 uint32_t u32Disp;
12034 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12035 u64EffAddr += (int32_t)u32Disp;
12036 }
12037 break;
12038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12039 }
12040 break;
12041 }
12042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12043 }
12044
12045 /* Get and add the displacement. */
12046 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12047 {
12048 case 0:
12049 break;
12050 case 1:
12051 {
12052 int8_t i8Disp;
12053 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12054 u64EffAddr += i8Disp;
12055 break;
12056 }
12057 case 2:
12058 {
12059 uint32_t u32Disp;
12060 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12061 u64EffAddr += (int32_t)u32Disp;
12062 break;
12063 }
12064 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12065 }
12066
12067 }
12068
12069 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12070 *pGCPtrEff = u64EffAddr;
12071 else
12072 {
12073 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12074 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12075 }
12076 }
12077
12078 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12079 return VINF_SUCCESS;
12080}
12081
12082
12083#ifdef IEM_WITH_SETJMP
12084/**
12085 * Calculates the effective address of a ModR/M memory operand.
12086 *
12087 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12088 *
12089 * May longjmp on internal error.
12090 *
12091 * @return The effective address.
12092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12093 * @param bRm The ModRM byte.
12094 * @param cbImm The size of any immediate following the
12095 * effective address opcode bytes. Important for
12096 * RIP relative addressing.
12097 */
12098IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12099{
12100 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12101 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12102# define SET_SS_DEF() \
12103 do \
12104 { \
12105 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12106 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12107 } while (0)
12108
12109 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12110 {
12111/** @todo Check the effective address size crap! */
12112 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12113 {
12114 uint16_t u16EffAddr;
12115
12116 /* Handle the disp16 form with no registers first. */
12117 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12118 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12119 else
12120 {
12121 /* Get the displacment. */
12122 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12123 {
12124 case 0: u16EffAddr = 0; break;
12125 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12126 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12127 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12128 }
12129
12130 /* Add the base and index registers to the disp. */
12131 switch (bRm & X86_MODRM_RM_MASK)
12132 {
12133 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12134 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12135 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12136 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12137 case 4: u16EffAddr += pCtx->si; break;
12138 case 5: u16EffAddr += pCtx->di; break;
12139 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12140 case 7: u16EffAddr += pCtx->bx; break;
12141 }
12142 }
12143
12144 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12145 return u16EffAddr;
12146 }
12147
12148 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12149 uint32_t u32EffAddr;
12150
12151 /* Handle the disp32 form with no registers first. */
12152 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12153 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12154 else
12155 {
12156 /* Get the register (or SIB) value. */
12157 switch ((bRm & X86_MODRM_RM_MASK))
12158 {
12159 case 0: u32EffAddr = pCtx->eax; break;
12160 case 1: u32EffAddr = pCtx->ecx; break;
12161 case 2: u32EffAddr = pCtx->edx; break;
12162 case 3: u32EffAddr = pCtx->ebx; break;
12163 case 4: /* SIB */
12164 {
12165 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12166
12167 /* Get the index and scale it. */
12168 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12169 {
12170 case 0: u32EffAddr = pCtx->eax; break;
12171 case 1: u32EffAddr = pCtx->ecx; break;
12172 case 2: u32EffAddr = pCtx->edx; break;
12173 case 3: u32EffAddr = pCtx->ebx; break;
12174 case 4: u32EffAddr = 0; /*none */ break;
12175 case 5: u32EffAddr = pCtx->ebp; break;
12176 case 6: u32EffAddr = pCtx->esi; break;
12177 case 7: u32EffAddr = pCtx->edi; break;
12178 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12179 }
12180 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12181
12182 /* add base */
12183 switch (bSib & X86_SIB_BASE_MASK)
12184 {
12185 case 0: u32EffAddr += pCtx->eax; break;
12186 case 1: u32EffAddr += pCtx->ecx; break;
12187 case 2: u32EffAddr += pCtx->edx; break;
12188 case 3: u32EffAddr += pCtx->ebx; break;
12189 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12190 case 5:
12191 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12192 {
12193 u32EffAddr += pCtx->ebp;
12194 SET_SS_DEF();
12195 }
12196 else
12197 {
12198 uint32_t u32Disp;
12199 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12200 u32EffAddr += u32Disp;
12201 }
12202 break;
12203 case 6: u32EffAddr += pCtx->esi; break;
12204 case 7: u32EffAddr += pCtx->edi; break;
12205 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12206 }
12207 break;
12208 }
12209 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12210 case 6: u32EffAddr = pCtx->esi; break;
12211 case 7: u32EffAddr = pCtx->edi; break;
12212 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12213 }
12214
12215 /* Get and add the displacement. */
12216 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12217 {
12218 case 0:
12219 break;
12220 case 1:
12221 {
12222 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12223 u32EffAddr += i8Disp;
12224 break;
12225 }
12226 case 2:
12227 {
12228 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12229 u32EffAddr += u32Disp;
12230 break;
12231 }
12232 default:
12233 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12234 }
12235 }
12236
12237 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12238 {
12239 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12240 return u32EffAddr;
12241 }
12242 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12243 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12244 return u32EffAddr & UINT16_MAX;
12245 }
12246
12247 uint64_t u64EffAddr;
12248
12249 /* Handle the rip+disp32 form with no registers first. */
12250 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12251 {
12252 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12253 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12254 }
12255 else
12256 {
12257 /* Get the register (or SIB) value. */
12258 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12259 {
12260 case 0: u64EffAddr = pCtx->rax; break;
12261 case 1: u64EffAddr = pCtx->rcx; break;
12262 case 2: u64EffAddr = pCtx->rdx; break;
12263 case 3: u64EffAddr = pCtx->rbx; break;
12264 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12265 case 6: u64EffAddr = pCtx->rsi; break;
12266 case 7: u64EffAddr = pCtx->rdi; break;
12267 case 8: u64EffAddr = pCtx->r8; break;
12268 case 9: u64EffAddr = pCtx->r9; break;
12269 case 10: u64EffAddr = pCtx->r10; break;
12270 case 11: u64EffAddr = pCtx->r11; break;
12271 case 13: u64EffAddr = pCtx->r13; break;
12272 case 14: u64EffAddr = pCtx->r14; break;
12273 case 15: u64EffAddr = pCtx->r15; break;
12274 /* SIB */
12275 case 4:
12276 case 12:
12277 {
12278 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12279
12280 /* Get the index and scale it. */
12281 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12282 {
12283 case 0: u64EffAddr = pCtx->rax; break;
12284 case 1: u64EffAddr = pCtx->rcx; break;
12285 case 2: u64EffAddr = pCtx->rdx; break;
12286 case 3: u64EffAddr = pCtx->rbx; break;
12287 case 4: u64EffAddr = 0; /*none */ break;
12288 case 5: u64EffAddr = pCtx->rbp; break;
12289 case 6: u64EffAddr = pCtx->rsi; break;
12290 case 7: u64EffAddr = pCtx->rdi; break;
12291 case 8: u64EffAddr = pCtx->r8; break;
12292 case 9: u64EffAddr = pCtx->r9; break;
12293 case 10: u64EffAddr = pCtx->r10; break;
12294 case 11: u64EffAddr = pCtx->r11; break;
12295 case 12: u64EffAddr = pCtx->r12; break;
12296 case 13: u64EffAddr = pCtx->r13; break;
12297 case 14: u64EffAddr = pCtx->r14; break;
12298 case 15: u64EffAddr = pCtx->r15; break;
12299 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12300 }
12301 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12302
12303 /* add base */
12304 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12305 {
12306 case 0: u64EffAddr += pCtx->rax; break;
12307 case 1: u64EffAddr += pCtx->rcx; break;
12308 case 2: u64EffAddr += pCtx->rdx; break;
12309 case 3: u64EffAddr += pCtx->rbx; break;
12310 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12311 case 6: u64EffAddr += pCtx->rsi; break;
12312 case 7: u64EffAddr += pCtx->rdi; break;
12313 case 8: u64EffAddr += pCtx->r8; break;
12314 case 9: u64EffAddr += pCtx->r9; break;
12315 case 10: u64EffAddr += pCtx->r10; break;
12316 case 11: u64EffAddr += pCtx->r11; break;
12317 case 12: u64EffAddr += pCtx->r12; break;
12318 case 14: u64EffAddr += pCtx->r14; break;
12319 case 15: u64EffAddr += pCtx->r15; break;
12320 /* complicated encodings */
12321 case 5:
12322 case 13:
12323 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12324 {
12325 if (!pVCpu->iem.s.uRexB)
12326 {
12327 u64EffAddr += pCtx->rbp;
12328 SET_SS_DEF();
12329 }
12330 else
12331 u64EffAddr += pCtx->r13;
12332 }
12333 else
12334 {
12335 uint32_t u32Disp;
12336 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12337 u64EffAddr += (int32_t)u32Disp;
12338 }
12339 break;
12340 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12341 }
12342 break;
12343 }
12344 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12345 }
12346
12347 /* Get and add the displacement. */
12348 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12349 {
12350 case 0:
12351 break;
12352 case 1:
12353 {
12354 int8_t i8Disp;
12355 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12356 u64EffAddr += i8Disp;
12357 break;
12358 }
12359 case 2:
12360 {
12361 uint32_t u32Disp;
12362 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12363 u64EffAddr += (int32_t)u32Disp;
12364 break;
12365 }
12366 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12367 }
12368
12369 }
12370
12371 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12372 {
12373 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12374 return u64EffAddr;
12375 }
12376 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12377 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12378 return u64EffAddr & UINT32_MAX;
12379}
12380#endif /* IEM_WITH_SETJMP */
12381
12382
12383/** @} */
12384
12385
12386
12387/*
12388 * Include the instructions
12389 */
12390#include "IEMAllInstructions.cpp.h"
12391
12392
12393
12394
12395#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12396
12397/**
12398 * Sets up execution verification mode.
12399 */
12400IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12401{
12402 PVMCPU pVCpu = pVCpu;
12403 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12404
12405 /*
12406 * Always note down the address of the current instruction.
12407 */
12408 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12409 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12410
12411 /*
12412 * Enable verification and/or logging.
12413 */
12414 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12415 if ( fNewNoRem
12416 && ( 0
12417#if 0 /* auto enable on first paged protected mode interrupt */
12418 || ( pOrgCtx->eflags.Bits.u1IF
12419 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12420 && TRPMHasTrap(pVCpu)
12421 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12422#endif
12423#if 0
12424 || ( pOrgCtx->cs == 0x10
12425 && ( pOrgCtx->rip == 0x90119e3e
12426 || pOrgCtx->rip == 0x901d9810)
12427#endif
12428#if 0 /* Auto enable DSL - FPU stuff. */
12429 || ( pOrgCtx->cs == 0x10
12430 && (// pOrgCtx->rip == 0xc02ec07f
12431 //|| pOrgCtx->rip == 0xc02ec082
12432 //|| pOrgCtx->rip == 0xc02ec0c9
12433 0
12434 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12435#endif
12436#if 0 /* Auto enable DSL - fstp st0 stuff. */
12437 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12438#endif
12439#if 0
12440 || pOrgCtx->rip == 0x9022bb3a
12441#endif
12442#if 0
12443 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12444#endif
12445#if 0
12446 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12447 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12448#endif
12449#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12450 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12451 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12452 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12453#endif
12454#if 0 /* NT4SP1 - xadd early boot. */
12455 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12456#endif
12457#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12458 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12459#endif
12460#if 0 /* NT4SP1 - cmpxchg (AMD). */
12461 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12462#endif
12463#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12464 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12465#endif
12466#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12467 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12468
12469#endif
12470#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12471 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12472
12473#endif
12474#if 0 /* NT4SP1 - frstor [ecx] */
12475 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12476#endif
12477#if 0 /* xxxxxx - All long mode code. */
12478 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12479#endif
12480#if 0 /* rep movsq linux 3.7 64-bit boot. */
12481 || (pOrgCtx->rip == 0x0000000000100241)
12482#endif
12483#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12484 || (pOrgCtx->rip == 0x000000000215e240)
12485#endif
12486#if 0 /* DOS's size-overridden iret to v8086. */
12487 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12488#endif
12489 )
12490 )
12491 {
12492 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12493 RTLogFlags(NULL, "enabled");
12494 fNewNoRem = false;
12495 }
12496 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12497 {
12498 pVCpu->iem.s.fNoRem = fNewNoRem;
12499 if (!fNewNoRem)
12500 {
12501 LogAlways(("Enabling verification mode!\n"));
12502 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12503 }
12504 else
12505 LogAlways(("Disabling verification mode!\n"));
12506 }
12507
12508 /*
12509 * Switch state.
12510 */
12511 if (IEM_VERIFICATION_ENABLED(pVCpu))
12512 {
12513 static CPUMCTX s_DebugCtx; /* Ugly! */
12514
12515 s_DebugCtx = *pOrgCtx;
12516 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12517 }
12518
12519 /*
12520 * See if there is an interrupt pending in TRPM and inject it if we can.
12521 */
12522 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12523 if ( pOrgCtx->eflags.Bits.u1IF
12524 && TRPMHasTrap(pVCpu)
12525 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12526 {
12527 uint8_t u8TrapNo;
12528 TRPMEVENT enmType;
12529 RTGCUINT uErrCode;
12530 RTGCPTR uCr2;
12531 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12532 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12533 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12534 TRPMResetTrap(pVCpu);
12535 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12536 }
12537
12538 /*
12539 * Reset the counters.
12540 */
12541 pVCpu->iem.s.cIOReads = 0;
12542 pVCpu->iem.s.cIOWrites = 0;
12543 pVCpu->iem.s.fIgnoreRaxRdx = false;
12544 pVCpu->iem.s.fOverlappingMovs = false;
12545 pVCpu->iem.s.fProblematicMemory = false;
12546 pVCpu->iem.s.fUndefinedEFlags = 0;
12547
12548 if (IEM_VERIFICATION_ENABLED(pVCpu))
12549 {
12550 /*
12551 * Free all verification records.
12552 */
12553 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12554 pVCpu->iem.s.pIemEvtRecHead = NULL;
12555 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12556 do
12557 {
12558 while (pEvtRec)
12559 {
12560 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12561 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12562 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12563 pEvtRec = pNext;
12564 }
12565 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12566 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12567 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12568 } while (pEvtRec);
12569 }
12570}
12571
12572
12573/**
12574 * Allocate an event record.
12575 * @returns Pointer to a record.
12576 */
12577IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12578{
12579 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12580 return NULL;
12581
12582 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12583 if (pEvtRec)
12584 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12585 else
12586 {
12587 if (!pVCpu->iem.s.ppIemEvtRecNext)
12588 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12589
12590 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12591 if (!pEvtRec)
12592 return NULL;
12593 }
12594 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12595 pEvtRec->pNext = NULL;
12596 return pEvtRec;
12597}
12598
12599
12600/**
12601 * IOMMMIORead notification.
12602 */
12603VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12604{
12605 PVMCPU pVCpu = VMMGetCpu(pVM);
12606 if (!pVCpu)
12607 return;
12608 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12609 if (!pEvtRec)
12610 return;
12611 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12612 pEvtRec->u.RamRead.GCPhys = GCPhys;
12613 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12614 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12615 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12616}
12617
12618
12619/**
12620 * IOMMMIOWrite notification.
12621 */
12622VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12623{
12624 PVMCPU pVCpu = VMMGetCpu(pVM);
12625 if (!pVCpu)
12626 return;
12627 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12628 if (!pEvtRec)
12629 return;
12630 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12631 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12632 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12633 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12634 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12635 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12636 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12637 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12638 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12639}
12640
12641
12642/**
12643 * IOMIOPortRead notification.
12644 */
12645VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12646{
12647 PVMCPU pVCpu = VMMGetCpu(pVM);
12648 if (!pVCpu)
12649 return;
12650 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12651 if (!pEvtRec)
12652 return;
12653 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12654 pEvtRec->u.IOPortRead.Port = Port;
12655 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12656 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12657 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12658}
12659
12660/**
12661 * IOMIOPortWrite notification.
12662 */
12663VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12664{
12665 PVMCPU pVCpu = VMMGetCpu(pVM);
12666 if (!pVCpu)
12667 return;
12668 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12669 if (!pEvtRec)
12670 return;
12671 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12672 pEvtRec->u.IOPortWrite.Port = Port;
12673 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12674 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12675 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12676 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12677}
12678
12679
12680VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12681{
12682 PVMCPU pVCpu = VMMGetCpu(pVM);
12683 if (!pVCpu)
12684 return;
12685 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12686 if (!pEvtRec)
12687 return;
12688 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12689 pEvtRec->u.IOPortStrRead.Port = Port;
12690 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12691 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12692 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12693 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12694}
12695
12696
12697VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12698{
12699 PVMCPU pVCpu = VMMGetCpu(pVM);
12700 if (!pVCpu)
12701 return;
12702 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12703 if (!pEvtRec)
12704 return;
12705 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12706 pEvtRec->u.IOPortStrWrite.Port = Port;
12707 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12708 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12709 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12710 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12711}
12712
12713
12714/**
12715 * Fakes and records an I/O port read.
12716 *
12717 * @returns VINF_SUCCESS.
12718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12719 * @param Port The I/O port.
12720 * @param pu32Value Where to store the fake value.
12721 * @param cbValue The size of the access.
12722 */
12723IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12724{
12725 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12726 if (pEvtRec)
12727 {
12728 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12729 pEvtRec->u.IOPortRead.Port = Port;
12730 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12731 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12732 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12733 }
12734 pVCpu->iem.s.cIOReads++;
12735 *pu32Value = 0xcccccccc;
12736 return VINF_SUCCESS;
12737}
12738
12739
12740/**
12741 * Fakes and records an I/O port write.
12742 *
12743 * @returns VINF_SUCCESS.
12744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12745 * @param Port The I/O port.
12746 * @param u32Value The value being written.
12747 * @param cbValue The size of the access.
12748 */
12749IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12750{
12751 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12752 if (pEvtRec)
12753 {
12754 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12755 pEvtRec->u.IOPortWrite.Port = Port;
12756 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12757 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12758 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12759 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12760 }
12761 pVCpu->iem.s.cIOWrites++;
12762 return VINF_SUCCESS;
12763}
12764
12765
12766/**
12767 * Used to add extra details about a stub case.
12768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12769 */
12770IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12771{
12772 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12773 PVM pVM = pVCpu->CTX_SUFF(pVM);
12774 PVMCPU pVCpu = pVCpu;
12775 char szRegs[4096];
12776 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12777 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12778 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12779 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12780 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12781 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12782 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12783 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12784 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12785 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12786 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12787 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12788 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12789 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12790 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12791 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12792 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12793 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12794 " efer=%016VR{efer}\n"
12795 " pat=%016VR{pat}\n"
12796 " sf_mask=%016VR{sf_mask}\n"
12797 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12798 " lstar=%016VR{lstar}\n"
12799 " star=%016VR{star} cstar=%016VR{cstar}\n"
12800 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12801 );
12802
12803 char szInstr1[256];
12804 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12805 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12806 szInstr1, sizeof(szInstr1), NULL);
12807 char szInstr2[256];
12808 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12809 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12810 szInstr2, sizeof(szInstr2), NULL);
12811
12812 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12813}
12814
12815
12816/**
12817 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12818 * dump to the assertion info.
12819 *
12820 * @param pEvtRec The record to dump.
12821 */
12822IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12823{
12824 switch (pEvtRec->enmEvent)
12825 {
12826 case IEMVERIFYEVENT_IOPORT_READ:
12827 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12828 pEvtRec->u.IOPortWrite.Port,
12829 pEvtRec->u.IOPortWrite.cbValue);
12830 break;
12831 case IEMVERIFYEVENT_IOPORT_WRITE:
12832 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12833 pEvtRec->u.IOPortWrite.Port,
12834 pEvtRec->u.IOPortWrite.cbValue,
12835 pEvtRec->u.IOPortWrite.u32Value);
12836 break;
12837 case IEMVERIFYEVENT_IOPORT_STR_READ:
12838 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12839 pEvtRec->u.IOPortStrWrite.Port,
12840 pEvtRec->u.IOPortStrWrite.cbValue,
12841 pEvtRec->u.IOPortStrWrite.cTransfers);
12842 break;
12843 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12844 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12845 pEvtRec->u.IOPortStrWrite.Port,
12846 pEvtRec->u.IOPortStrWrite.cbValue,
12847 pEvtRec->u.IOPortStrWrite.cTransfers);
12848 break;
12849 case IEMVERIFYEVENT_RAM_READ:
12850 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12851 pEvtRec->u.RamRead.GCPhys,
12852 pEvtRec->u.RamRead.cb);
12853 break;
12854 case IEMVERIFYEVENT_RAM_WRITE:
12855 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12856 pEvtRec->u.RamWrite.GCPhys,
12857 pEvtRec->u.RamWrite.cb,
12858 (int)pEvtRec->u.RamWrite.cb,
12859 pEvtRec->u.RamWrite.ab);
12860 break;
12861 default:
12862 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12863 break;
12864 }
12865}
12866
12867
12868/**
12869 * Raises an assertion on the specified record, showing the given message with
12870 * a record dump attached.
12871 *
12872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12873 * @param pEvtRec1 The first record.
12874 * @param pEvtRec2 The second record.
12875 * @param pszMsg The message explaining why we're asserting.
12876 */
12877IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12878{
12879 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12880 iemVerifyAssertAddRecordDump(pEvtRec1);
12881 iemVerifyAssertAddRecordDump(pEvtRec2);
12882 iemVerifyAssertMsg2(pVCpu);
12883 RTAssertPanic();
12884}
12885
12886
12887/**
12888 * Raises an assertion on the specified record, showing the given message with
12889 * a record dump attached.
12890 *
12891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12892 * @param pEvtRec1 The first record.
12893 * @param pszMsg The message explaining why we're asserting.
12894 */
12895IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
12896{
12897 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12898 iemVerifyAssertAddRecordDump(pEvtRec);
12899 iemVerifyAssertMsg2(pVCpu);
12900 RTAssertPanic();
12901}
12902
12903
12904/**
12905 * Verifies a write record.
12906 *
12907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12908 * @param pEvtRec The write record.
12909 * @param fRem Set if REM was doing the other executing. If clear
12910 * it was HM.
12911 */
12912IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
12913{
12914 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
12915 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
12916 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
12917 if ( RT_FAILURE(rc)
12918 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
12919 {
12920 /* fend off ins */
12921 if ( !pVCpu->iem.s.cIOReads
12922 || pEvtRec->u.RamWrite.ab[0] != 0xcc
12923 || ( pEvtRec->u.RamWrite.cb != 1
12924 && pEvtRec->u.RamWrite.cb != 2
12925 && pEvtRec->u.RamWrite.cb != 4) )
12926 {
12927 /* fend off ROMs and MMIO */
12928 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
12929 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
12930 {
12931 /* fend off fxsave */
12932 if (pEvtRec->u.RamWrite.cb != 512)
12933 {
12934 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
12935 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12936 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
12937 RTAssertMsg2Add("%s: %.*Rhxs\n"
12938 "iem: %.*Rhxs\n",
12939 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
12940 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
12941 iemVerifyAssertAddRecordDump(pEvtRec);
12942 iemVerifyAssertMsg2(pVCpu);
12943 RTAssertPanic();
12944 }
12945 }
12946 }
12947 }
12948
12949}
12950
12951/**
12952 * Performs the post-execution verfication checks.
12953 */
12954IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
12955{
12956 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12957 return rcStrictIem;
12958
12959 /*
12960 * Switch back the state.
12961 */
12962 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
12963 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
12964 Assert(pOrgCtx != pDebugCtx);
12965 IEM_GET_CTX(pVCpu) = pOrgCtx;
12966
12967 /*
12968 * Execute the instruction in REM.
12969 */
12970 bool fRem = false;
12971 PVM pVM = pVCpu->CTX_SUFF(pVM);
12972 PVMCPU pVCpu = pVCpu;
12973 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
12974#ifdef IEM_VERIFICATION_MODE_FULL_HM
12975 if ( HMIsEnabled(pVM)
12976 && pVCpu->iem.s.cIOReads == 0
12977 && pVCpu->iem.s.cIOWrites == 0
12978 && !pVCpu->iem.s.fProblematicMemory)
12979 {
12980 uint64_t uStartRip = pOrgCtx->rip;
12981 unsigned iLoops = 0;
12982 do
12983 {
12984 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
12985 iLoops++;
12986 } while ( rc == VINF_SUCCESS
12987 || ( rc == VINF_EM_DBG_STEPPED
12988 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12989 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
12990 || ( pOrgCtx->rip != pDebugCtx->rip
12991 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
12992 && iLoops < 8) );
12993 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
12994 rc = VINF_SUCCESS;
12995 }
12996#endif
12997 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
12998 || rc == VINF_IOM_R3_IOPORT_READ
12999 || rc == VINF_IOM_R3_IOPORT_WRITE
13000 || rc == VINF_IOM_R3_MMIO_READ
13001 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13002 || rc == VINF_IOM_R3_MMIO_WRITE
13003 || rc == VINF_CPUM_R3_MSR_READ
13004 || rc == VINF_CPUM_R3_MSR_WRITE
13005 || rc == VINF_EM_RESCHEDULE
13006 )
13007 {
13008 EMRemLock(pVM);
13009 rc = REMR3EmulateInstruction(pVM, pVCpu);
13010 AssertRC(rc);
13011 EMRemUnlock(pVM);
13012 fRem = true;
13013 }
13014
13015# if 1 /* Skip unimplemented instructions for now. */
13016 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13017 {
13018 IEM_GET_CTX(pVCpu) = pOrgCtx;
13019 if (rc == VINF_EM_DBG_STEPPED)
13020 return VINF_SUCCESS;
13021 return rc;
13022 }
13023# endif
13024
13025 /*
13026 * Compare the register states.
13027 */
13028 unsigned cDiffs = 0;
13029 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13030 {
13031 //Log(("REM and IEM ends up with different registers!\n"));
13032 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13033
13034# define CHECK_FIELD(a_Field) \
13035 do \
13036 { \
13037 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13038 { \
13039 switch (sizeof(pOrgCtx->a_Field)) \
13040 { \
13041 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13042 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13043 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13044 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13045 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13046 } \
13047 cDiffs++; \
13048 } \
13049 } while (0)
13050# define CHECK_XSTATE_FIELD(a_Field) \
13051 do \
13052 { \
13053 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13054 { \
13055 switch (sizeof(pOrgXState->a_Field)) \
13056 { \
13057 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13058 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13059 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13060 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13061 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13062 } \
13063 cDiffs++; \
13064 } \
13065 } while (0)
13066
13067# define CHECK_BIT_FIELD(a_Field) \
13068 do \
13069 { \
13070 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13071 { \
13072 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13073 cDiffs++; \
13074 } \
13075 } while (0)
13076
13077# define CHECK_SEL(a_Sel) \
13078 do \
13079 { \
13080 CHECK_FIELD(a_Sel.Sel); \
13081 CHECK_FIELD(a_Sel.Attr.u); \
13082 CHECK_FIELD(a_Sel.u64Base); \
13083 CHECK_FIELD(a_Sel.u32Limit); \
13084 CHECK_FIELD(a_Sel.fFlags); \
13085 } while (0)
13086
13087 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13088 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13089
13090#if 1 /* The recompiler doesn't update these the intel way. */
13091 if (fRem)
13092 {
13093 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13094 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13095 pOrgXState->x87.CS = pDebugXState->x87.CS;
13096 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13097 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13098 pOrgXState->x87.DS = pDebugXState->x87.DS;
13099 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13100 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13101 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13102 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13103 }
13104#endif
13105 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13106 {
13107 RTAssertMsg2Weak(" the FPU state differs\n");
13108 cDiffs++;
13109 CHECK_XSTATE_FIELD(x87.FCW);
13110 CHECK_XSTATE_FIELD(x87.FSW);
13111 CHECK_XSTATE_FIELD(x87.FTW);
13112 CHECK_XSTATE_FIELD(x87.FOP);
13113 CHECK_XSTATE_FIELD(x87.FPUIP);
13114 CHECK_XSTATE_FIELD(x87.CS);
13115 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13116 CHECK_XSTATE_FIELD(x87.FPUDP);
13117 CHECK_XSTATE_FIELD(x87.DS);
13118 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13119 CHECK_XSTATE_FIELD(x87.MXCSR);
13120 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13121 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13122 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13123 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13124 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13125 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13126 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13127 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13128 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13129 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13130 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13131 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13132 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13133 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13134 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13135 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13136 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13137 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13138 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13139 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13140 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13141 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13142 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13143 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13144 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13145 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13146 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13147 }
13148 CHECK_FIELD(rip);
13149 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13150 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13151 {
13152 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13153 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13154 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13155 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13156 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13157 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13158 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13159 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13160 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13161 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13162 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13163 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13164 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13165 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13166 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13167 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13168 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13169 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13170 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13171 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13172 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13173 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13174 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13175 }
13176
13177 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13178 CHECK_FIELD(rax);
13179 CHECK_FIELD(rcx);
13180 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13181 CHECK_FIELD(rdx);
13182 CHECK_FIELD(rbx);
13183 CHECK_FIELD(rsp);
13184 CHECK_FIELD(rbp);
13185 CHECK_FIELD(rsi);
13186 CHECK_FIELD(rdi);
13187 CHECK_FIELD(r8);
13188 CHECK_FIELD(r9);
13189 CHECK_FIELD(r10);
13190 CHECK_FIELD(r11);
13191 CHECK_FIELD(r12);
13192 CHECK_FIELD(r13);
13193 CHECK_SEL(cs);
13194 CHECK_SEL(ss);
13195 CHECK_SEL(ds);
13196 CHECK_SEL(es);
13197 CHECK_SEL(fs);
13198 CHECK_SEL(gs);
13199 CHECK_FIELD(cr0);
13200
13201 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13202 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13203 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13204 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13205 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13206 {
13207 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13208 { /* ignore */ }
13209 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13210 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13211 && fRem)
13212 { /* ignore */ }
13213 else
13214 CHECK_FIELD(cr2);
13215 }
13216 CHECK_FIELD(cr3);
13217 CHECK_FIELD(cr4);
13218 CHECK_FIELD(dr[0]);
13219 CHECK_FIELD(dr[1]);
13220 CHECK_FIELD(dr[2]);
13221 CHECK_FIELD(dr[3]);
13222 CHECK_FIELD(dr[6]);
13223 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13224 CHECK_FIELD(dr[7]);
13225 CHECK_FIELD(gdtr.cbGdt);
13226 CHECK_FIELD(gdtr.pGdt);
13227 CHECK_FIELD(idtr.cbIdt);
13228 CHECK_FIELD(idtr.pIdt);
13229 CHECK_SEL(ldtr);
13230 CHECK_SEL(tr);
13231 CHECK_FIELD(SysEnter.cs);
13232 CHECK_FIELD(SysEnter.eip);
13233 CHECK_FIELD(SysEnter.esp);
13234 CHECK_FIELD(msrEFER);
13235 CHECK_FIELD(msrSTAR);
13236 CHECK_FIELD(msrPAT);
13237 CHECK_FIELD(msrLSTAR);
13238 CHECK_FIELD(msrCSTAR);
13239 CHECK_FIELD(msrSFMASK);
13240 CHECK_FIELD(msrKERNELGSBASE);
13241
13242 if (cDiffs != 0)
13243 {
13244 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13245 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13246 RTAssertPanic();
13247 static bool volatile s_fEnterDebugger = true;
13248 if (s_fEnterDebugger)
13249 DBGFSTOP(pVM);
13250
13251# if 1 /* Ignore unimplemented instructions for now. */
13252 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13253 rcStrictIem = VINF_SUCCESS;
13254# endif
13255 }
13256# undef CHECK_FIELD
13257# undef CHECK_BIT_FIELD
13258 }
13259
13260 /*
13261 * If the register state compared fine, check the verification event
13262 * records.
13263 */
13264 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13265 {
13266 /*
13267 * Compare verficiation event records.
13268 * - I/O port accesses should be a 1:1 match.
13269 */
13270 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13271 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13272 while (pIemRec && pOtherRec)
13273 {
13274 /* Since we might miss RAM writes and reads, ignore reads and check
13275 that any written memory is the same extra ones. */
13276 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13277 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13278 && pIemRec->pNext)
13279 {
13280 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13281 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13282 pIemRec = pIemRec->pNext;
13283 }
13284
13285 /* Do the compare. */
13286 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13287 {
13288 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13289 break;
13290 }
13291 bool fEquals;
13292 switch (pIemRec->enmEvent)
13293 {
13294 case IEMVERIFYEVENT_IOPORT_READ:
13295 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13296 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13297 break;
13298 case IEMVERIFYEVENT_IOPORT_WRITE:
13299 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13300 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13301 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13302 break;
13303 case IEMVERIFYEVENT_IOPORT_STR_READ:
13304 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13305 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13306 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13307 break;
13308 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13309 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13310 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13311 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13312 break;
13313 case IEMVERIFYEVENT_RAM_READ:
13314 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13315 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13316 break;
13317 case IEMVERIFYEVENT_RAM_WRITE:
13318 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13319 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13320 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13321 break;
13322 default:
13323 fEquals = false;
13324 break;
13325 }
13326 if (!fEquals)
13327 {
13328 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13329 break;
13330 }
13331
13332 /* advance */
13333 pIemRec = pIemRec->pNext;
13334 pOtherRec = pOtherRec->pNext;
13335 }
13336
13337 /* Ignore extra writes and reads. */
13338 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13339 {
13340 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13341 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13342 pIemRec = pIemRec->pNext;
13343 }
13344 if (pIemRec != NULL)
13345 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13346 else if (pOtherRec != NULL)
13347 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13348 }
13349 IEM_GET_CTX(pVCpu) = pOrgCtx;
13350
13351 return rcStrictIem;
13352}
13353
13354#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13355
13356/* stubs */
13357IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13358{
13359 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13360 return VERR_INTERNAL_ERROR;
13361}
13362
13363IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13364{
13365 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13366 return VERR_INTERNAL_ERROR;
13367}
13368
13369#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13370
13371
13372#ifdef LOG_ENABLED
13373/**
13374 * Logs the current instruction.
13375 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13376 * @param pCtx The current CPU context.
13377 * @param fSameCtx Set if we have the same context information as the VMM,
13378 * clear if we may have already executed an instruction in
13379 * our debug context. When clear, we assume IEMCPU holds
13380 * valid CPU mode info.
13381 */
13382IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13383{
13384# ifdef IN_RING3
13385 if (LogIs2Enabled())
13386 {
13387 char szInstr[256];
13388 uint32_t cbInstr = 0;
13389 if (fSameCtx)
13390 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13391 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13392 szInstr, sizeof(szInstr), &cbInstr);
13393 else
13394 {
13395 uint32_t fFlags = 0;
13396 switch (pVCpu->iem.s.enmCpuMode)
13397 {
13398 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13399 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13400 case IEMMODE_16BIT:
13401 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13402 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13403 else
13404 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13405 break;
13406 }
13407 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13408 szInstr, sizeof(szInstr), &cbInstr);
13409 }
13410
13411 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13412 Log2(("****\n"
13413 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13414 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13415 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13416 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13417 " %s\n"
13418 ,
13419 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13420 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13421 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13422 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13423 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13424 szInstr));
13425
13426 if (LogIs3Enabled())
13427 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13428 }
13429 else
13430# endif
13431 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13432 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13433}
13434#endif
13435
13436
13437/**
13438 * Makes status code addjustments (pass up from I/O and access handler)
13439 * as well as maintaining statistics.
13440 *
13441 * @returns Strict VBox status code to pass up.
13442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13443 * @param rcStrict The status from executing an instruction.
13444 */
13445DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13446{
13447 if (rcStrict != VINF_SUCCESS)
13448 {
13449 if (RT_SUCCESS(rcStrict))
13450 {
13451 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13452 || rcStrict == VINF_IOM_R3_IOPORT_READ
13453 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13454 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13455 || rcStrict == VINF_IOM_R3_MMIO_READ
13456 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13457 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13458 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13459 || rcStrict == VINF_CPUM_R3_MSR_READ
13460 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13461 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13462 || rcStrict == VINF_EM_RAW_TO_R3
13463 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13464 /* raw-mode / virt handlers only: */
13465 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13466 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13467 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13468 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13469 || rcStrict == VINF_SELM_SYNC_GDT
13470 || rcStrict == VINF_CSAM_PENDING_ACTION
13471 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13472 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13473/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13474 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13475 if (rcPassUp == VINF_SUCCESS)
13476 pVCpu->iem.s.cRetInfStatuses++;
13477 else if ( rcPassUp < VINF_EM_FIRST
13478 || rcPassUp > VINF_EM_LAST
13479 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13480 {
13481 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13482 pVCpu->iem.s.cRetPassUpStatus++;
13483 rcStrict = rcPassUp;
13484 }
13485 else
13486 {
13487 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13488 pVCpu->iem.s.cRetInfStatuses++;
13489 }
13490 }
13491 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13492 pVCpu->iem.s.cRetAspectNotImplemented++;
13493 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13494 pVCpu->iem.s.cRetInstrNotImplemented++;
13495#ifdef IEM_VERIFICATION_MODE_FULL
13496 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13497 rcStrict = VINF_SUCCESS;
13498#endif
13499 else
13500 pVCpu->iem.s.cRetErrStatuses++;
13501 }
13502 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13503 {
13504 pVCpu->iem.s.cRetPassUpStatus++;
13505 rcStrict = pVCpu->iem.s.rcPassUp;
13506 }
13507
13508 return rcStrict;
13509}
13510
13511
13512/**
13513 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13514 * IEMExecOneWithPrefetchedByPC.
13515 *
13516 * Similar code is found in IEMExecLots.
13517 *
13518 * @return Strict VBox status code.
13519 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13521 * @param fExecuteInhibit If set, execute the instruction following CLI,
13522 * POP SS and MOV SS,GR.
13523 */
13524#ifdef __GNUC__
13525DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13526#else
13527DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13528#endif
13529{
13530#ifdef IEM_WITH_SETJMP
13531 VBOXSTRICTRC rcStrict;
13532 jmp_buf JmpBuf;
13533 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13534 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13535 if ((rcStrict = setjmp(JmpBuf)) == 0)
13536 {
13537 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13538 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13539 }
13540 else
13541 pVCpu->iem.s.cLongJumps++;
13542 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13543#else
13544 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13545 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13546#endif
13547 if (rcStrict == VINF_SUCCESS)
13548 pVCpu->iem.s.cInstructions++;
13549 if (pVCpu->iem.s.cActiveMappings > 0)
13550 {
13551 Assert(rcStrict != VINF_SUCCESS);
13552 iemMemRollback(pVCpu);
13553 }
13554//#ifdef DEBUG
13555// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13556//#endif
13557
13558 /* Execute the next instruction as well if a cli, pop ss or
13559 mov ss, Gr has just completed successfully. */
13560 if ( fExecuteInhibit
13561 && rcStrict == VINF_SUCCESS
13562 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13563 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13564 {
13565 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13566 if (rcStrict == VINF_SUCCESS)
13567 {
13568#ifdef LOG_ENABLED
13569 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13570#endif
13571#ifdef IEM_WITH_SETJMP
13572 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13573 if ((rcStrict = setjmp(JmpBuf)) == 0)
13574 {
13575 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13576 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13577 }
13578 else
13579 pVCpu->iem.s.cLongJumps++;
13580 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13581#else
13582 IEM_OPCODE_GET_NEXT_U8(&b);
13583 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13584#endif
13585 if (rcStrict == VINF_SUCCESS)
13586 pVCpu->iem.s.cInstructions++;
13587 if (pVCpu->iem.s.cActiveMappings > 0)
13588 {
13589 Assert(rcStrict != VINF_SUCCESS);
13590 iemMemRollback(pVCpu);
13591 }
13592 }
13593 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13594 }
13595
13596 /*
13597 * Return value fiddling, statistics and sanity assertions.
13598 */
13599 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13600
13601 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13602 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13603#if defined(IEM_VERIFICATION_MODE_FULL)
13604 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13605 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13606 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13607 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13608#endif
13609 return rcStrict;
13610}
13611
13612
13613#ifdef IN_RC
13614/**
13615 * Re-enters raw-mode or ensure we return to ring-3.
13616 *
13617 * @returns rcStrict, maybe modified.
13618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13619 * @param pCtx The current CPU context.
13620 * @param rcStrict The status code returne by the interpreter.
13621 */
13622DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13623{
13624 if ( !pVCpu->iem.s.fInPatchCode
13625 && ( rcStrict == VINF_SUCCESS
13626 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13627 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13628 {
13629 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13630 CPUMRawEnter(pVCpu);
13631 else
13632 {
13633 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13634 rcStrict = VINF_EM_RESCHEDULE;
13635 }
13636 }
13637 return rcStrict;
13638}
13639#endif
13640
13641
13642/**
13643 * Execute one instruction.
13644 *
13645 * @return Strict VBox status code.
13646 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13647 */
13648VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13649{
13650#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13651 if (++pVCpu->iem.s.cVerifyDepth == 1)
13652 iemExecVerificationModeSetup(pVCpu);
13653#endif
13654#ifdef LOG_ENABLED
13655 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13656 iemLogCurInstr(pVCpu, pCtx, true);
13657#endif
13658
13659 /*
13660 * Do the decoding and emulation.
13661 */
13662 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13663 if (rcStrict == VINF_SUCCESS)
13664 rcStrict = iemExecOneInner(pVCpu, true);
13665
13666#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13667 /*
13668 * Assert some sanity.
13669 */
13670 if (pVCpu->iem.s.cVerifyDepth == 1)
13671 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13672 pVCpu->iem.s.cVerifyDepth--;
13673#endif
13674#ifdef IN_RC
13675 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13676#endif
13677 if (rcStrict != VINF_SUCCESS)
13678 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13679 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13680 return rcStrict;
13681}
13682
13683
13684VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13685{
13686 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13687 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13688
13689 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13690 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13691 if (rcStrict == VINF_SUCCESS)
13692 {
13693 rcStrict = iemExecOneInner(pVCpu, true);
13694 if (pcbWritten)
13695 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13696 }
13697
13698#ifdef IN_RC
13699 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13700#endif
13701 return rcStrict;
13702}
13703
13704
13705VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13706 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13707{
13708 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13709 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13710
13711 VBOXSTRICTRC rcStrict;
13712 if ( cbOpcodeBytes
13713 && pCtx->rip == OpcodeBytesPC)
13714 {
13715 iemInitDecoder(pVCpu, false);
13716#ifdef IEM_WITH_CODE_TLB
13717 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13718 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13719 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13720 pVCpu->iem.s.offCurInstrStart = 0;
13721 pVCpu->iem.s.offInstrNextByte = 0;
13722#else
13723 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13724 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13725#endif
13726 rcStrict = VINF_SUCCESS;
13727 }
13728 else
13729 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13730 if (rcStrict == VINF_SUCCESS)
13731 {
13732 rcStrict = iemExecOneInner(pVCpu, true);
13733 }
13734
13735#ifdef IN_RC
13736 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13737#endif
13738 return rcStrict;
13739}
13740
13741
13742VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13743{
13744 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13745 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13746
13747 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13748 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13749 if (rcStrict == VINF_SUCCESS)
13750 {
13751 rcStrict = iemExecOneInner(pVCpu, false);
13752 if (pcbWritten)
13753 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13754 }
13755
13756#ifdef IN_RC
13757 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13758#endif
13759 return rcStrict;
13760}
13761
13762
13763VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13764 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13765{
13766 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13767 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13768
13769 VBOXSTRICTRC rcStrict;
13770 if ( cbOpcodeBytes
13771 && pCtx->rip == OpcodeBytesPC)
13772 {
13773 iemInitDecoder(pVCpu, true);
13774#ifdef IEM_WITH_CODE_TLB
13775 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13776 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13777 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13778 pVCpu->iem.s.offCurInstrStart = 0;
13779 pVCpu->iem.s.offInstrNextByte = 0;
13780#else
13781 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13782 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13783#endif
13784 rcStrict = VINF_SUCCESS;
13785 }
13786 else
13787 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13788 if (rcStrict == VINF_SUCCESS)
13789 rcStrict = iemExecOneInner(pVCpu, false);
13790
13791#ifdef IN_RC
13792 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13793#endif
13794 return rcStrict;
13795}
13796
13797
13798/**
13799 * For debugging DISGetParamSize, may come in handy.
13800 *
13801 * @returns Strict VBox status code.
13802 * @param pVCpu The cross context virtual CPU structure of the
13803 * calling EMT.
13804 * @param pCtxCore The context core structure.
13805 * @param OpcodeBytesPC The PC of the opcode bytes.
13806 * @param pvOpcodeBytes Prefeched opcode bytes.
13807 * @param cbOpcodeBytes Number of prefetched bytes.
13808 * @param pcbWritten Where to return the number of bytes written.
13809 * Optional.
13810 */
13811VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13812 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13813 uint32_t *pcbWritten)
13814{
13815 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13816 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13817
13818 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13819 VBOXSTRICTRC rcStrict;
13820 if ( cbOpcodeBytes
13821 && pCtx->rip == OpcodeBytesPC)
13822 {
13823 iemInitDecoder(pVCpu, true);
13824#ifdef IEM_WITH_CODE_TLB
13825 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13826 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13827 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13828 pVCpu->iem.s.offCurInstrStart = 0;
13829 pVCpu->iem.s.offInstrNextByte = 0;
13830#else
13831 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13832 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13833#endif
13834 rcStrict = VINF_SUCCESS;
13835 }
13836 else
13837 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13838 if (rcStrict == VINF_SUCCESS)
13839 {
13840 rcStrict = iemExecOneInner(pVCpu, false);
13841 if (pcbWritten)
13842 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13843 }
13844
13845#ifdef IN_RC
13846 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13847#endif
13848 return rcStrict;
13849}
13850
13851
13852VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13853{
13854 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13855
13856#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13857 /*
13858 * See if there is an interrupt pending in TRPM, inject it if we can.
13859 */
13860 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13861# ifdef IEM_VERIFICATION_MODE_FULL
13862 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13863# endif
13864 if ( pCtx->eflags.Bits.u1IF
13865 && TRPMHasTrap(pVCpu)
13866 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13867 {
13868 uint8_t u8TrapNo;
13869 TRPMEVENT enmType;
13870 RTGCUINT uErrCode;
13871 RTGCPTR uCr2;
13872 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13873 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13874 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13875 TRPMResetTrap(pVCpu);
13876 }
13877
13878 /*
13879 * Log the state.
13880 */
13881# ifdef LOG_ENABLED
13882 iemLogCurInstr(pVCpu, pCtx, true);
13883# endif
13884
13885 /*
13886 * Do the decoding and emulation.
13887 */
13888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13889 if (rcStrict == VINF_SUCCESS)
13890 rcStrict = iemExecOneInner(pVCpu, true);
13891
13892 /*
13893 * Assert some sanity.
13894 */
13895 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13896
13897 /*
13898 * Log and return.
13899 */
13900 if (rcStrict != VINF_SUCCESS)
13901 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13902 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13903 if (pcInstructions)
13904 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
13905 return rcStrict;
13906
13907#else /* Not verification mode */
13908
13909 /*
13910 * See if there is an interrupt pending in TRPM, inject it if we can.
13911 */
13912 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13913# ifdef IEM_VERIFICATION_MODE_FULL
13914 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13915# endif
13916 if ( pCtx->eflags.Bits.u1IF
13917 && TRPMHasTrap(pVCpu)
13918 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13919 {
13920 uint8_t u8TrapNo;
13921 TRPMEVENT enmType;
13922 RTGCUINT uErrCode;
13923 RTGCPTR uCr2;
13924 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13925 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13926 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13927 TRPMResetTrap(pVCpu);
13928 }
13929
13930 /*
13931 * Initial decoder init w/ prefetch, then setup setjmp.
13932 */
13933 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13934 if (rcStrict == VINF_SUCCESS)
13935 {
13936# ifdef IEM_WITH_SETJMP
13937 jmp_buf JmpBuf;
13938 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13939 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13940 pVCpu->iem.s.cActiveMappings = 0;
13941 if ((rcStrict = setjmp(JmpBuf)) == 0)
13942# endif
13943 {
13944 /*
13945 * The run loop. We limit ourselves to 4096 instructions right now.
13946 */
13947 PVM pVM = pVCpu->CTX_SUFF(pVM);
13948 uint32_t cInstr = 4096;
13949 for (;;)
13950 {
13951 /*
13952 * Log the state.
13953 */
13954# ifdef LOG_ENABLED
13955 iemLogCurInstr(pVCpu, pCtx, true);
13956# endif
13957
13958 /*
13959 * Do the decoding and emulation.
13960 */
13961 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13962 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13964 {
13965 Assert(pVCpu->iem.s.cActiveMappings == 0);
13966 pVCpu->iem.s.cInstructions++;
13967 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
13968 {
13969 uint32_t fCpu = pVCpu->fLocalForcedActions
13970 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
13971 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
13972 | VMCPU_FF_TLB_FLUSH
13973# ifdef VBOX_WITH_RAW_MODE
13974 | VMCPU_FF_TRPM_SYNC_IDT
13975 | VMCPU_FF_SELM_SYNC_TSS
13976 | VMCPU_FF_SELM_SYNC_GDT
13977 | VMCPU_FF_SELM_SYNC_LDT
13978# endif
13979 | VMCPU_FF_INHIBIT_INTERRUPTS
13980 | VMCPU_FF_BLOCK_NMIS ));
13981
13982 if (RT_LIKELY( ( !fCpu
13983 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
13984 && !pCtx->rflags.Bits.u1IF) )
13985 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
13986 {
13987 if (cInstr-- > 0)
13988 {
13989 Assert(pVCpu->iem.s.cActiveMappings == 0);
13990 iemReInitDecoder(pVCpu);
13991 continue;
13992 }
13993 }
13994 }
13995 Assert(pVCpu->iem.s.cActiveMappings == 0);
13996 }
13997 else if (pVCpu->iem.s.cActiveMappings > 0)
13998 iemMemRollback(pVCpu);
13999 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14000 break;
14001 }
14002 }
14003# ifdef IEM_WITH_SETJMP
14004 else
14005 {
14006 if (pVCpu->iem.s.cActiveMappings > 0)
14007 iemMemRollback(pVCpu);
14008 pVCpu->iem.s.cLongJumps++;
14009 }
14010 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14011# endif
14012
14013 /*
14014 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14015 */
14016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14018# if defined(IEM_VERIFICATION_MODE_FULL)
14019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14023# endif
14024 }
14025
14026 /*
14027 * Maybe re-enter raw-mode and log.
14028 */
14029# ifdef IN_RC
14030 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14031# endif
14032 if (rcStrict != VINF_SUCCESS)
14033 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14034 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14035 if (pcInstructions)
14036 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14037 return rcStrict;
14038#endif /* Not verification mode */
14039}
14040
14041
14042
14043/**
14044 * Injects a trap, fault, abort, software interrupt or external interrupt.
14045 *
14046 * The parameter list matches TRPMQueryTrapAll pretty closely.
14047 *
14048 * @returns Strict VBox status code.
14049 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14050 * @param u8TrapNo The trap number.
14051 * @param enmType What type is it (trap/fault/abort), software
14052 * interrupt or hardware interrupt.
14053 * @param uErrCode The error code if applicable.
14054 * @param uCr2 The CR2 value if applicable.
14055 * @param cbInstr The instruction length (only relevant for
14056 * software interrupts).
14057 */
14058VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14059 uint8_t cbInstr)
14060{
14061 iemInitDecoder(pVCpu, false);
14062#ifdef DBGFTRACE_ENABLED
14063 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14064 u8TrapNo, enmType, uErrCode, uCr2);
14065#endif
14066
14067 uint32_t fFlags;
14068 switch (enmType)
14069 {
14070 case TRPM_HARDWARE_INT:
14071 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14072 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14073 uErrCode = uCr2 = 0;
14074 break;
14075
14076 case TRPM_SOFTWARE_INT:
14077 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14078 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14079 uErrCode = uCr2 = 0;
14080 break;
14081
14082 case TRPM_TRAP:
14083 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14084 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14085 if (u8TrapNo == X86_XCPT_PF)
14086 fFlags |= IEM_XCPT_FLAGS_CR2;
14087 switch (u8TrapNo)
14088 {
14089 case X86_XCPT_DF:
14090 case X86_XCPT_TS:
14091 case X86_XCPT_NP:
14092 case X86_XCPT_SS:
14093 case X86_XCPT_PF:
14094 case X86_XCPT_AC:
14095 fFlags |= IEM_XCPT_FLAGS_ERR;
14096 break;
14097
14098 case X86_XCPT_NMI:
14099 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14100 break;
14101 }
14102 break;
14103
14104 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14105 }
14106
14107 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14108}
14109
14110
14111/**
14112 * Injects the active TRPM event.
14113 *
14114 * @returns Strict VBox status code.
14115 * @param pVCpu The cross context virtual CPU structure.
14116 */
14117VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14118{
14119#ifndef IEM_IMPLEMENTS_TASKSWITCH
14120 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14121#else
14122 uint8_t u8TrapNo;
14123 TRPMEVENT enmType;
14124 RTGCUINT uErrCode;
14125 RTGCUINTPTR uCr2;
14126 uint8_t cbInstr;
14127 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14128 if (RT_FAILURE(rc))
14129 return rc;
14130
14131 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14132
14133 /** @todo Are there any other codes that imply the event was successfully
14134 * delivered to the guest? See @bugref{6607}. */
14135 if ( rcStrict == VINF_SUCCESS
14136 || rcStrict == VINF_IEM_RAISED_XCPT)
14137 {
14138 TRPMResetTrap(pVCpu);
14139 }
14140 return rcStrict;
14141#endif
14142}
14143
14144
14145VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14146{
14147 return VERR_NOT_IMPLEMENTED;
14148}
14149
14150
14151VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14152{
14153 return VERR_NOT_IMPLEMENTED;
14154}
14155
14156
14157#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14158/**
14159 * Executes a IRET instruction with default operand size.
14160 *
14161 * This is for PATM.
14162 *
14163 * @returns VBox status code.
14164 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14165 * @param pCtxCore The register frame.
14166 */
14167VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14168{
14169 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14170
14171 iemCtxCoreToCtx(pCtx, pCtxCore);
14172 iemInitDecoder(pVCpu);
14173 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14174 if (rcStrict == VINF_SUCCESS)
14175 iemCtxToCtxCore(pCtxCore, pCtx);
14176 else
14177 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14178 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14179 return rcStrict;
14180}
14181#endif
14182
14183
14184/**
14185 * Macro used by the IEMExec* method to check the given instruction length.
14186 *
14187 * Will return on failure!
14188 *
14189 * @param a_cbInstr The given instruction length.
14190 * @param a_cbMin The minimum length.
14191 */
14192#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14193 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14194 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14195
14196
14197/**
14198 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14199 *
14200 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14201 *
14202 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14204 * @param rcStrict The status code to fiddle.
14205 */
14206DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14207{
14208 iemUninitExec(pVCpu);
14209#ifdef IN_RC
14210 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14211 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14212#else
14213 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14214#endif
14215}
14216
14217
14218/**
14219 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14220 *
14221 * This API ASSUMES that the caller has already verified that the guest code is
14222 * allowed to access the I/O port. (The I/O port is in the DX register in the
14223 * guest state.)
14224 *
14225 * @returns Strict VBox status code.
14226 * @param pVCpu The cross context virtual CPU structure.
14227 * @param cbValue The size of the I/O port access (1, 2, or 4).
14228 * @param enmAddrMode The addressing mode.
14229 * @param fRepPrefix Indicates whether a repeat prefix is used
14230 * (doesn't matter which for this instruction).
14231 * @param cbInstr The instruction length in bytes.
14232 * @param iEffSeg The effective segment address.
14233 * @param fIoChecked Whether the access to the I/O port has been
14234 * checked or not. It's typically checked in the
14235 * HM scenario.
14236 */
14237VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14238 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14239{
14240 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14241 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14242
14243 /*
14244 * State init.
14245 */
14246 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14247
14248 /*
14249 * Switch orgy for getting to the right handler.
14250 */
14251 VBOXSTRICTRC rcStrict;
14252 if (fRepPrefix)
14253 {
14254 switch (enmAddrMode)
14255 {
14256 case IEMMODE_16BIT:
14257 switch (cbValue)
14258 {
14259 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14260 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14261 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14262 default:
14263 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14264 }
14265 break;
14266
14267 case IEMMODE_32BIT:
14268 switch (cbValue)
14269 {
14270 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14271 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14272 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14273 default:
14274 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14275 }
14276 break;
14277
14278 case IEMMODE_64BIT:
14279 switch (cbValue)
14280 {
14281 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14282 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14283 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14284 default:
14285 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14286 }
14287 break;
14288
14289 default:
14290 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14291 }
14292 }
14293 else
14294 {
14295 switch (enmAddrMode)
14296 {
14297 case IEMMODE_16BIT:
14298 switch (cbValue)
14299 {
14300 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14301 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14302 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14303 default:
14304 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14305 }
14306 break;
14307
14308 case IEMMODE_32BIT:
14309 switch (cbValue)
14310 {
14311 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14312 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14313 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14314 default:
14315 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14316 }
14317 break;
14318
14319 case IEMMODE_64BIT:
14320 switch (cbValue)
14321 {
14322 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14323 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14324 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14325 default:
14326 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14327 }
14328 break;
14329
14330 default:
14331 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14332 }
14333 }
14334
14335 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14336}
14337
14338
14339/**
14340 * Interface for HM and EM for executing string I/O IN (read) instructions.
14341 *
14342 * This API ASSUMES that the caller has already verified that the guest code is
14343 * allowed to access the I/O port. (The I/O port is in the DX register in the
14344 * guest state.)
14345 *
14346 * @returns Strict VBox status code.
14347 * @param pVCpu The cross context virtual CPU structure.
14348 * @param cbValue The size of the I/O port access (1, 2, or 4).
14349 * @param enmAddrMode The addressing mode.
14350 * @param fRepPrefix Indicates whether a repeat prefix is used
14351 * (doesn't matter which for this instruction).
14352 * @param cbInstr The instruction length in bytes.
14353 * @param fIoChecked Whether the access to the I/O port has been
14354 * checked or not. It's typically checked in the
14355 * HM scenario.
14356 */
14357VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14358 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14359{
14360 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14361
14362 /*
14363 * State init.
14364 */
14365 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14366
14367 /*
14368 * Switch orgy for getting to the right handler.
14369 */
14370 VBOXSTRICTRC rcStrict;
14371 if (fRepPrefix)
14372 {
14373 switch (enmAddrMode)
14374 {
14375 case IEMMODE_16BIT:
14376 switch (cbValue)
14377 {
14378 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14379 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14380 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14381 default:
14382 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14383 }
14384 break;
14385
14386 case IEMMODE_32BIT:
14387 switch (cbValue)
14388 {
14389 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14390 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14391 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14392 default:
14393 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14394 }
14395 break;
14396
14397 case IEMMODE_64BIT:
14398 switch (cbValue)
14399 {
14400 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14401 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14402 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14403 default:
14404 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14405 }
14406 break;
14407
14408 default:
14409 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14410 }
14411 }
14412 else
14413 {
14414 switch (enmAddrMode)
14415 {
14416 case IEMMODE_16BIT:
14417 switch (cbValue)
14418 {
14419 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14420 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14421 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14422 default:
14423 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14424 }
14425 break;
14426
14427 case IEMMODE_32BIT:
14428 switch (cbValue)
14429 {
14430 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14431 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14432 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14433 default:
14434 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14435 }
14436 break;
14437
14438 case IEMMODE_64BIT:
14439 switch (cbValue)
14440 {
14441 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14442 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14443 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14444 default:
14445 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14446 }
14447 break;
14448
14449 default:
14450 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14451 }
14452 }
14453
14454 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14455}
14456
14457
14458/**
14459 * Interface for rawmode to write execute an OUT instruction.
14460 *
14461 * @returns Strict VBox status code.
14462 * @param pVCpu The cross context virtual CPU structure.
14463 * @param cbInstr The instruction length in bytes.
14464 * @param u16Port The port to read.
14465 * @param cbReg The register size.
14466 *
14467 * @remarks In ring-0 not all of the state needs to be synced in.
14468 */
14469VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14470{
14471 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14472 Assert(cbReg <= 4 && cbReg != 3);
14473
14474 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14475 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14476 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14477}
14478
14479
14480/**
14481 * Interface for rawmode to write execute an IN instruction.
14482 *
14483 * @returns Strict VBox status code.
14484 * @param pVCpu The cross context virtual CPU structure.
14485 * @param cbInstr The instruction length in bytes.
14486 * @param u16Port The port to read.
14487 * @param cbReg The register size.
14488 */
14489VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14490{
14491 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14492 Assert(cbReg <= 4 && cbReg != 3);
14493
14494 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14497}
14498
14499
14500/**
14501 * Interface for HM and EM to write to a CRx register.
14502 *
14503 * @returns Strict VBox status code.
14504 * @param pVCpu The cross context virtual CPU structure.
14505 * @param cbInstr The instruction length in bytes.
14506 * @param iCrReg The control register number (destination).
14507 * @param iGReg The general purpose register number (source).
14508 *
14509 * @remarks In ring-0 not all of the state needs to be synced in.
14510 */
14511VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14512{
14513 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14514 Assert(iCrReg < 16);
14515 Assert(iGReg < 16);
14516
14517 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14519 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14520}
14521
14522
14523/**
14524 * Interface for HM and EM to read from a CRx register.
14525 *
14526 * @returns Strict VBox status code.
14527 * @param pVCpu The cross context virtual CPU structure.
14528 * @param cbInstr The instruction length in bytes.
14529 * @param iGReg The general purpose register number (destination).
14530 * @param iCrReg The control register number (source).
14531 *
14532 * @remarks In ring-0 not all of the state needs to be synced in.
14533 */
14534VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14535{
14536 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14537 Assert(iCrReg < 16);
14538 Assert(iGReg < 16);
14539
14540 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14541 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14542 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14543}
14544
14545
14546/**
14547 * Interface for HM and EM to clear the CR0[TS] bit.
14548 *
14549 * @returns Strict VBox status code.
14550 * @param pVCpu The cross context virtual CPU structure.
14551 * @param cbInstr The instruction length in bytes.
14552 *
14553 * @remarks In ring-0 not all of the state needs to be synced in.
14554 */
14555VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14556{
14557 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14558
14559 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14560 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14561 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14562}
14563
14564
14565/**
14566 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14567 *
14568 * @returns Strict VBox status code.
14569 * @param pVCpu The cross context virtual CPU structure.
14570 * @param cbInstr The instruction length in bytes.
14571 * @param uValue The value to load into CR0.
14572 *
14573 * @remarks In ring-0 not all of the state needs to be synced in.
14574 */
14575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14576{
14577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14578
14579 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14580 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14581 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14582}
14583
14584
14585/**
14586 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14587 *
14588 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14589 *
14590 * @returns Strict VBox status code.
14591 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14592 * @param cbInstr The instruction length in bytes.
14593 * @remarks In ring-0 not all of the state needs to be synced in.
14594 * @thread EMT(pVCpu)
14595 */
14596VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14597{
14598 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14599
14600 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14601 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14602 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14603}
14604
14605#ifdef IN_RING3
14606
14607/**
14608 * Handles the unlikely and probably fatal merge cases.
14609 *
14610 * @returns Merged status code.
14611 * @param rcStrict Current EM status code.
14612 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14613 * with @a rcStrict.
14614 * @param iMemMap The memory mapping index. For error reporting only.
14615 * @param pVCpu The cross context virtual CPU structure of the calling
14616 * thread, for error reporting only.
14617 */
14618DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14619 unsigned iMemMap, PVMCPU pVCpu)
14620{
14621 if (RT_FAILURE_NP(rcStrict))
14622 return rcStrict;
14623
14624 if (RT_FAILURE_NP(rcStrictCommit))
14625 return rcStrictCommit;
14626
14627 if (rcStrict == rcStrictCommit)
14628 return rcStrictCommit;
14629
14630 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14631 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14632 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14633 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14634 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14635 return VERR_IOM_FF_STATUS_IPE;
14636}
14637
14638
14639/**
14640 * Helper for IOMR3ProcessForceFlag.
14641 *
14642 * @returns Merged status code.
14643 * @param rcStrict Current EM status code.
14644 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14645 * with @a rcStrict.
14646 * @param iMemMap The memory mapping index. For error reporting only.
14647 * @param pVCpu The cross context virtual CPU structure of the calling
14648 * thread, for error reporting only.
14649 */
14650DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14651{
14652 /* Simple. */
14653 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14654 return rcStrictCommit;
14655
14656 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14657 return rcStrict;
14658
14659 /* EM scheduling status codes. */
14660 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14661 && rcStrict <= VINF_EM_LAST))
14662 {
14663 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14664 && rcStrictCommit <= VINF_EM_LAST))
14665 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14666 }
14667
14668 /* Unlikely */
14669 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14670}
14671
14672
14673/**
14674 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14675 *
14676 * @returns Merge between @a rcStrict and what the commit operation returned.
14677 * @param pVM The cross context VM structure.
14678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14679 * @param rcStrict The status code returned by ring-0 or raw-mode.
14680 */
14681VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14682{
14683 /*
14684 * Reset the pending commit.
14685 */
14686 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14687 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14688 ("%#x %#x %#x\n",
14689 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14690 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14691
14692 /*
14693 * Commit the pending bounce buffers (usually just one).
14694 */
14695 unsigned cBufs = 0;
14696 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14697 while (iMemMap-- > 0)
14698 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14699 {
14700 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14701 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14702 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14703
14704 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14705 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14706 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14707
14708 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14709 {
14710 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14711 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14712 pbBuf,
14713 cbFirst,
14714 PGMACCESSORIGIN_IEM);
14715 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14716 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14717 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14718 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14719 }
14720
14721 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14722 {
14723 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14724 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14725 pbBuf + cbFirst,
14726 cbSecond,
14727 PGMACCESSORIGIN_IEM);
14728 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14729 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14730 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14731 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14732 }
14733 cBufs++;
14734 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14735 }
14736
14737 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14738 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14739 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14740 pVCpu->iem.s.cActiveMappings = 0;
14741 return rcStrict;
14742}
14743
14744#endif /* IN_RING3 */
14745
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette