VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62171

Last change on this file since 62171 was 62171, checked in by vboxsync, 8 years ago

IEM: Working on instruction fetching optimizations (incomplete and disabled).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 564.8 KB
Line 
1/* $Id: IEMAll.cpp 62171 2016-07-11 18:30:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84//#define IEM_WITH_CODE_TLB - work in progress
85
86
87/*********************************************************************************************************************************
88* Header Files *
89*********************************************************************************************************************************/
90#define LOG_GROUP LOG_GROUP_IEM
91#define VMCPU_INCL_CPUM_GST_CTX
92#include <VBox/vmm/iem.h>
93#include <VBox/vmm/cpum.h>
94#include <VBox/vmm/pdm.h>
95#include <VBox/vmm/pgm.h>
96#include <internal/pgm.h>
97#include <VBox/vmm/iom.h>
98#include <VBox/vmm/em.h>
99#include <VBox/vmm/hm.h>
100#include <VBox/vmm/tm.h>
101#include <VBox/vmm/dbgf.h>
102#include <VBox/vmm/dbgftrace.h>
103#ifdef VBOX_WITH_RAW_MODE_NOT_R0
104# include <VBox/vmm/patm.h>
105# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
106# include <VBox/vmm/csam.h>
107# endif
108#endif
109#include "IEMInternal.h"
110#ifdef IEM_VERIFICATION_MODE_FULL
111# include <VBox/vmm/rem.h>
112# include <VBox/vmm/mm.h>
113#endif
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211
212/*********************************************************************************************************************************
213* Defined Constants And Macros *
214*********************************************************************************************************************************/
215/** @def IEM_WITH_SETJMP
216 * Enables alternative status code handling using setjmps.
217 *
218 * This adds a bit of expense via the setjmp() call since it saves all the
219 * non-volatile registers. However, it eliminates return code checks and allows
220 * for more optimal return value passing (return regs instead of stack buffer).
221 */
222#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
223# define IEM_WITH_SETJMP
224#endif
225
226/** Temporary hack to disable the double execution. Will be removed in favor
227 * of a dedicated execution mode in EM. */
228//#define IEM_VERIFICATION_MODE_NO_REM
229
230/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
231 * due to GCC lacking knowledge about the value range of a switch. */
232#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
233
234/**
235 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
236 * occation.
237 */
238#ifdef LOG_ENABLED
239# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
240 do { \
241 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
242 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
243 } while (0)
244#else
245# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
246 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
247#endif
248
249/**
250 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
251 * occation using the supplied logger statement.
252 *
253 * @param a_LoggerArgs What to log on failure.
254 */
255#ifdef LOG_ENABLED
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
257 do { \
258 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
259 /*LogFunc(a_LoggerArgs);*/ \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
261 } while (0)
262#else
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
265#endif
266
267/**
268 * Call an opcode decoder function.
269 *
270 * We're using macors for this so that adding and removing parameters can be
271 * done as we please. See FNIEMOP_DEF.
272 */
273#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
274
275/**
276 * Call a common opcode decoder function taking one extra argument.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF_1.
280 */
281#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
290
291/**
292 * Check if we're currently executing in real or virtual 8086 mode.
293 *
294 * @returns @c true if it is, @c false if not.
295 * @param a_pVCpu The IEM state of the current CPU.
296 */
297#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
298
299/**
300 * Check if we're currently executing in virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
304 */
305#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in long mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in real mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
325 * @returns PCCPUMFEATURES
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
336
337/**
338 * Evaluates to true if we're presenting an Intel CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
341
342/**
343 * Evaluates to true if we're presenting an AMD CPU to the guest.
344 */
345#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
346
347/**
348 * Check if the address is canonical.
349 */
350#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
351
352/** @def IEM_USE_UNALIGNED_DATA_ACCESS
353 * Use unaligned accesses instead of elaborate byte assembly. */
354#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
355# define IEM_USE_UNALIGNED_DATA_ACCESS
356#endif
357
358
359/*********************************************************************************************************************************
360* Global Variables *
361*********************************************************************************************************************************/
362extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
363
364
365/** Function table for the ADD instruction. */
366IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
367{
368 iemAImpl_add_u8, iemAImpl_add_u8_locked,
369 iemAImpl_add_u16, iemAImpl_add_u16_locked,
370 iemAImpl_add_u32, iemAImpl_add_u32_locked,
371 iemAImpl_add_u64, iemAImpl_add_u64_locked
372};
373
374/** Function table for the ADC instruction. */
375IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
376{
377 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
378 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
379 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
380 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
381};
382
383/** Function table for the SUB instruction. */
384IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
385{
386 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
387 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
388 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
389 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
390};
391
392/** Function table for the SBB instruction. */
393IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
394{
395 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
396 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
397 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
398 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
399};
400
401/** Function table for the OR instruction. */
402IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
403{
404 iemAImpl_or_u8, iemAImpl_or_u8_locked,
405 iemAImpl_or_u16, iemAImpl_or_u16_locked,
406 iemAImpl_or_u32, iemAImpl_or_u32_locked,
407 iemAImpl_or_u64, iemAImpl_or_u64_locked
408};
409
410/** Function table for the XOR instruction. */
411IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
412{
413 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
414 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
415 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
416 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
417};
418
419/** Function table for the AND instruction. */
420IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
421{
422 iemAImpl_and_u8, iemAImpl_and_u8_locked,
423 iemAImpl_and_u16, iemAImpl_and_u16_locked,
424 iemAImpl_and_u32, iemAImpl_and_u32_locked,
425 iemAImpl_and_u64, iemAImpl_and_u64_locked
426};
427
428/** Function table for the CMP instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
432{
433 iemAImpl_cmp_u8, NULL,
434 iemAImpl_cmp_u16, NULL,
435 iemAImpl_cmp_u32, NULL,
436 iemAImpl_cmp_u64, NULL
437};
438
439/** Function table for the TEST instruction.
440 * @remarks Making operand order ASSUMPTIONS.
441 */
442IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
443{
444 iemAImpl_test_u8, NULL,
445 iemAImpl_test_u16, NULL,
446 iemAImpl_test_u32, NULL,
447 iemAImpl_test_u64, NULL
448};
449
450/** Function table for the BT instruction. */
451IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
452{
453 NULL, NULL,
454 iemAImpl_bt_u16, NULL,
455 iemAImpl_bt_u32, NULL,
456 iemAImpl_bt_u64, NULL
457};
458
459/** Function table for the BTC instruction. */
460IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
461{
462 NULL, NULL,
463 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
464 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
465 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
466};
467
468/** Function table for the BTR instruction. */
469IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
470{
471 NULL, NULL,
472 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
473 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
474 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
475};
476
477/** Function table for the BTS instruction. */
478IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
479{
480 NULL, NULL,
481 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
482 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
483 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
484};
485
486/** Function table for the BSF instruction. */
487IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
488{
489 NULL, NULL,
490 iemAImpl_bsf_u16, NULL,
491 iemAImpl_bsf_u32, NULL,
492 iemAImpl_bsf_u64, NULL
493};
494
495/** Function table for the BSR instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
497{
498 NULL, NULL,
499 iemAImpl_bsr_u16, NULL,
500 iemAImpl_bsr_u32, NULL,
501 iemAImpl_bsr_u64, NULL
502};
503
504/** Function table for the IMUL instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
506{
507 NULL, NULL,
508 iemAImpl_imul_two_u16, NULL,
509 iemAImpl_imul_two_u32, NULL,
510 iemAImpl_imul_two_u64, NULL
511};
512
513/** Group 1 /r lookup table. */
514IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
515{
516 &g_iemAImpl_add,
517 &g_iemAImpl_or,
518 &g_iemAImpl_adc,
519 &g_iemAImpl_sbb,
520 &g_iemAImpl_and,
521 &g_iemAImpl_sub,
522 &g_iemAImpl_xor,
523 &g_iemAImpl_cmp
524};
525
526/** Function table for the INC instruction. */
527IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
528{
529 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
530 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
531 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
532 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
533};
534
535/** Function table for the DEC instruction. */
536IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
537{
538 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
539 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
540 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
541 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
542};
543
544/** Function table for the NEG instruction. */
545IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
546{
547 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
548 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
549 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
550 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
551};
552
553/** Function table for the NOT instruction. */
554IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
555{
556 iemAImpl_not_u8, iemAImpl_not_u8_locked,
557 iemAImpl_not_u16, iemAImpl_not_u16_locked,
558 iemAImpl_not_u32, iemAImpl_not_u32_locked,
559 iemAImpl_not_u64, iemAImpl_not_u64_locked
560};
561
562
563/** Function table for the ROL instruction. */
564IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
565{
566 iemAImpl_rol_u8,
567 iemAImpl_rol_u16,
568 iemAImpl_rol_u32,
569 iemAImpl_rol_u64
570};
571
572/** Function table for the ROR instruction. */
573IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
574{
575 iemAImpl_ror_u8,
576 iemAImpl_ror_u16,
577 iemAImpl_ror_u32,
578 iemAImpl_ror_u64
579};
580
581/** Function table for the RCL instruction. */
582IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
583{
584 iemAImpl_rcl_u8,
585 iemAImpl_rcl_u16,
586 iemAImpl_rcl_u32,
587 iemAImpl_rcl_u64
588};
589
590/** Function table for the RCR instruction. */
591IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
592{
593 iemAImpl_rcr_u8,
594 iemAImpl_rcr_u16,
595 iemAImpl_rcr_u32,
596 iemAImpl_rcr_u64
597};
598
599/** Function table for the SHL instruction. */
600IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
601{
602 iemAImpl_shl_u8,
603 iemAImpl_shl_u16,
604 iemAImpl_shl_u32,
605 iemAImpl_shl_u64
606};
607
608/** Function table for the SHR instruction. */
609IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
610{
611 iemAImpl_shr_u8,
612 iemAImpl_shr_u16,
613 iemAImpl_shr_u32,
614 iemAImpl_shr_u64
615};
616
617/** Function table for the SAR instruction. */
618IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
619{
620 iemAImpl_sar_u8,
621 iemAImpl_sar_u16,
622 iemAImpl_sar_u32,
623 iemAImpl_sar_u64
624};
625
626
627/** Function table for the MUL instruction. */
628IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
629{
630 iemAImpl_mul_u8,
631 iemAImpl_mul_u16,
632 iemAImpl_mul_u32,
633 iemAImpl_mul_u64
634};
635
636/** Function table for the IMUL instruction working implicitly on rAX. */
637IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
638{
639 iemAImpl_imul_u8,
640 iemAImpl_imul_u16,
641 iemAImpl_imul_u32,
642 iemAImpl_imul_u64
643};
644
645/** Function table for the DIV instruction. */
646IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
647{
648 iemAImpl_div_u8,
649 iemAImpl_div_u16,
650 iemAImpl_div_u32,
651 iemAImpl_div_u64
652};
653
654/** Function table for the MUL instruction. */
655IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
656{
657 iemAImpl_idiv_u8,
658 iemAImpl_idiv_u16,
659 iemAImpl_idiv_u32,
660 iemAImpl_idiv_u64
661};
662
663/** Function table for the SHLD instruction */
664IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
665{
666 iemAImpl_shld_u16,
667 iemAImpl_shld_u32,
668 iemAImpl_shld_u64,
669};
670
671/** Function table for the SHRD instruction */
672IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
673{
674 iemAImpl_shrd_u16,
675 iemAImpl_shrd_u32,
676 iemAImpl_shrd_u64,
677};
678
679
680/** Function table for the PUNPCKLBW instruction */
681IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
682/** Function table for the PUNPCKLBD instruction */
683IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
684/** Function table for the PUNPCKLDQ instruction */
685IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
686/** Function table for the PUNPCKLQDQ instruction */
687IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
688
689/** Function table for the PUNPCKHBW instruction */
690IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
691/** Function table for the PUNPCKHBD instruction */
692IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
693/** Function table for the PUNPCKHDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
695/** Function table for the PUNPCKHQDQ instruction */
696IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
697
698/** Function table for the PXOR instruction */
699IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
700/** Function table for the PCMPEQB instruction */
701IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
702/** Function table for the PCMPEQW instruction */
703IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
704/** Function table for the PCMPEQD instruction */
705IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
706
707
708#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
709/** What IEM just wrote. */
710uint8_t g_abIemWrote[256];
711/** How much IEM just wrote. */
712size_t g_cbIemWrote;
713#endif
714
715
716/*********************************************************************************************************************************
717* Internal Functions *
718*********************************************************************************************************************************/
719IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
721IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
722IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
723/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
724IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
725IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
726IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
727IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
728IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
729IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
730IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
733IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
734IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
735IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
736#ifdef IEM_WITH_SETJMP
737DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741#endif
742
743IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
744IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
745IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
747IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
752IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
753IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
754IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
756IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
757IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
758IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
759
760#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
761IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
762#endif
763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
764IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
765
766
767
768/**
769 * Sets the pass up status.
770 *
771 * @returns VINF_SUCCESS.
772 * @param pVCpu The cross context virtual CPU structure of the
773 * calling thread.
774 * @param rcPassUp The pass up status. Must be informational.
775 * VINF_SUCCESS is not allowed.
776 */
777IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
778{
779 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
780
781 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
782 if (rcOldPassUp == VINF_SUCCESS)
783 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
784 /* If both are EM scheduling codes, use EM priority rules. */
785 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
786 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
787 {
788 if (rcPassUp < rcOldPassUp)
789 {
790 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 }
793 else
794 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
795 }
796 /* Override EM scheduling with specific status code. */
797 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
798 {
799 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
800 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
801 }
802 /* Don't override specific status code, first come first served. */
803 else
804 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
805 return VINF_SUCCESS;
806}
807
808
809/**
810 * Calculates the CPU mode.
811 *
812 * This is mainly for updating IEMCPU::enmCpuMode.
813 *
814 * @returns CPU mode.
815 * @param pCtx The register context for the CPU.
816 */
817DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
818{
819 if (CPUMIsGuestIn64BitCodeEx(pCtx))
820 return IEMMODE_64BIT;
821 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
822 return IEMMODE_32BIT;
823 return IEMMODE_16BIT;
824}
825
826
827/**
828 * Initializes the execution state.
829 *
830 * @param pVCpu The cross context virtual CPU structure of the
831 * calling thread.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 *
834 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
835 * side-effects in strict builds.
836 */
837DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
838{
839 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
840
841 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
842
843#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
852#endif
853
854#ifdef VBOX_WITH_RAW_MODE_NOT_R0
855 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
856#endif
857 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
858 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
859#ifdef VBOX_STRICT
860 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
861 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
862 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
864 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef;
865 pVCpu->iem.s.uRexReg = 127;
866 pVCpu->iem.s.uRexB = 127;
867 pVCpu->iem.s.uRexIndex = 127;
868 pVCpu->iem.s.iEffSeg = 127;
869 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
870# ifdef IEM_WITH_CODE_TLB
871 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
872 pVCpu->iem.s.pbInstrBuf = NULL;
873 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
874 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;
876 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
877# else
878 pVCpu->iem.s.offOpcode = 127;
879 pVCpu->iem.s.cbOpcode = 127;
880# endif
881#endif
882
883 pVCpu->iem.s.cActiveMappings = 0;
884 pVCpu->iem.s.iNextMapping = 0;
885 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
886 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
887#ifdef VBOX_WITH_RAW_MODE_NOT_R0
888 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
889 && pCtx->cs.u64Base == 0
890 && pCtx->cs.u32Limit == UINT32_MAX
891 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
892 if (!pVCpu->iem.s.fInPatchCode)
893 CPUMRawLeave(pVCpu, VINF_SUCCESS);
894#endif
895
896#ifdef IEM_VERIFICATION_MODE_FULL
897 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
898 pVCpu->iem.s.fNoRem = true;
899#endif
900}
901
902
903/**
904 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
905 *
906 * @param pVCpu The cross context virtual CPU structure of the
907 * calling thread.
908 */
909DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
910{
911 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
912#ifdef IEM_VERIFICATION_MODE_FULL
913 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
914#endif
915#ifdef VBOX_STRICT
916# ifdef IEM_WITH_CODE_TLB
917# else
918 pVCpu->iem.s.cbOpcode = 0;
919# endif
920#else
921 NOREF(pVCpu);
922#endif
923}
924
925
926/**
927 * Initializes the decoder state.
928 *
929 * iemReInitDecoder is mostly a copy of this function.
930 *
931 * @param pVCpu The cross context virtual CPU structure of the
932 * calling thread.
933 * @param fBypassHandlers Whether to bypass access handlers.
934 */
935DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
936{
937 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
938
939 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
940
941#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
950#endif
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
954#endif
955 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
956#ifdef IEM_VERIFICATION_MODE_FULL
957 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
958 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
959#endif
960 IEMMODE enmMode = iemCalcCpuMode(pCtx);
961 pVCpu->iem.s.enmCpuMode = enmMode;
962 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
963 pVCpu->iem.s.enmEffAddrMode = enmMode;
964 if (enmMode != IEMMODE_64BIT)
965 {
966 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
967 pVCpu->iem.s.enmEffOpSize = enmMode;
968 }
969 else
970 {
971 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
972 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
973 }
974 pVCpu->iem.s.fPrefixes = 0;
975 pVCpu->iem.s.uRexReg = 0;
976 pVCpu->iem.s.uRexB = 0;
977 pVCpu->iem.s.uRexIndex = 0;
978 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
979#ifdef IEM_WITH_CODE_TLB
980 pVCpu->iem.s.pbInstrBuf = NULL;
981 pVCpu->iem.s.offInstrNextByte = 0;
982 pVCpu->iem.s.offCurInstrStart = 0;
983# ifdef VBOX_STRICT
984 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
985 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
986 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
987# endif
988#else
989 pVCpu->iem.s.offOpcode = 0;
990 pVCpu->iem.s.cbOpcode = 0;
991#endif
992 pVCpu->iem.s.cActiveMappings = 0;
993 pVCpu->iem.s.iNextMapping = 0;
994 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
995 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
996#ifdef VBOX_WITH_RAW_MODE_NOT_R0
997 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
998 && pCtx->cs.u64Base == 0
999 && pCtx->cs.u32Limit == UINT32_MAX
1000 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1001 if (!pVCpu->iem.s.fInPatchCode)
1002 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1003#endif
1004
1005#ifdef DBGFTRACE_ENABLED
1006 switch (enmMode)
1007 {
1008 case IEMMODE_64BIT:
1009 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1010 break;
1011 case IEMMODE_32BIT:
1012 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1013 break;
1014 case IEMMODE_16BIT:
1015 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1016 break;
1017 }
1018#endif
1019}
1020
1021
1022/**
1023 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1024 *
1025 * This is mostly a copy of iemInitDecoder.
1026 *
1027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1028 */
1029DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1030{
1031 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1032
1033 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1034
1035#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1044#endif
1045
1046 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1047#ifdef IEM_VERIFICATION_MODE_FULL
1048 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1049 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1050#endif
1051 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1052 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1053 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1054 pVCpu->iem.s.enmEffAddrMode = enmMode;
1055 if (enmMode != IEMMODE_64BIT)
1056 {
1057 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1058 pVCpu->iem.s.enmEffOpSize = enmMode;
1059 }
1060 else
1061 {
1062 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1063 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1064 }
1065 pVCpu->iem.s.fPrefixes = 0;
1066 pVCpu->iem.s.uRexReg = 0;
1067 pVCpu->iem.s.uRexB = 0;
1068 pVCpu->iem.s.uRexIndex = 0;
1069 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1070#ifdef IEM_WITH_CODE_TLB
1071 if (pVCpu->iem.s.pbInstrBuf)
1072 {
1073 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1074 - pVCpu->iem.s.uInstrBufPc;
1075 if (off < pVCpu->iem.s.cbInstrBufTotal)
1076 {
1077 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1078 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1079 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1080 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1081 else
1082 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1083 }
1084 else
1085 {
1086 pVCpu->iem.s.pbInstrBuf = NULL;
1087 pVCpu->iem.s.offInstrNextByte = 0;
1088 pVCpu->iem.s.offCurInstrStart = 0;
1089 }
1090 }
1091 else
1092 {
1093 pVCpu->iem.s.offInstrNextByte = 0;
1094 pVCpu->iem.s.offCurInstrStart = 0;
1095 }
1096#else
1097 pVCpu->iem.s.cbOpcode = 0;
1098 pVCpu->iem.s.offOpcode = 0;
1099#endif
1100 Assert(pVCpu->iem.s.cActiveMappings == 0);
1101 pVCpu->iem.s.iNextMapping = 0;
1102 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1103 Assert(pVCpu->iem.s.fBypassHandlers == false);
1104#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1105 if (!pVCpu->iem.s.fInPatchCode)
1106 { /* likely */ }
1107 else
1108 {
1109 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1110 && pCtx->cs.u64Base == 0
1111 && pCtx->cs.u32Limit == UINT32_MAX
1112 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1113 if (!pVCpu->iem.s.fInPatchCode)
1114 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1115 }
1116#endif
1117
1118#ifdef DBGFTRACE_ENABLED
1119 switch (enmMode)
1120 {
1121 case IEMMODE_64BIT:
1122 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1123 break;
1124 case IEMMODE_32BIT:
1125 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1126 break;
1127 case IEMMODE_16BIT:
1128 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1129 break;
1130 }
1131#endif
1132}
1133
1134
1135
1136/**
1137 * Prefetch opcodes the first time when starting executing.
1138 *
1139 * @returns Strict VBox status code.
1140 * @param pVCpu The cross context virtual CPU structure of the
1141 * calling thread.
1142 * @param fBypassHandlers Whether to bypass access handlers.
1143 */
1144IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1145{
1146#ifdef IEM_VERIFICATION_MODE_FULL
1147 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1148#endif
1149 iemInitDecoder(pVCpu, fBypassHandlers);
1150
1151#ifdef IEM_WITH_CODE_TLB
1152 /** @todo Do ITLB lookup here. */
1153
1154#else /* !IEM_WITH_CODE_TLB */
1155
1156 /*
1157 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1158 *
1159 * First translate CS:rIP to a physical address.
1160 */
1161 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1162 uint32_t cbToTryRead;
1163 RTGCPTR GCPtrPC;
1164 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1165 {
1166 cbToTryRead = PAGE_SIZE;
1167 GCPtrPC = pCtx->rip;
1168 if (!IEM_IS_CANONICAL(GCPtrPC))
1169 return iemRaiseGeneralProtectionFault0(pVCpu);
1170 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1171 }
1172 else
1173 {
1174 uint32_t GCPtrPC32 = pCtx->eip;
1175 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1176 if (GCPtrPC32 > pCtx->cs.u32Limit)
1177 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1178 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1179 if (!cbToTryRead) /* overflowed */
1180 {
1181 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1182 cbToTryRead = UINT32_MAX;
1183 }
1184 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1185 Assert(GCPtrPC <= UINT32_MAX);
1186 }
1187
1188# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1189 /* Allow interpretation of patch manager code blocks since they can for
1190 instance throw #PFs for perfectly good reasons. */
1191 if (pVCpu->iem.s.fInPatchCode)
1192 {
1193 size_t cbRead = 0;
1194 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1195 AssertRCReturn(rc, rc);
1196 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1197 return VINF_SUCCESS;
1198 }
1199# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1200
1201 RTGCPHYS GCPhys;
1202 uint64_t fFlags;
1203 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1204 if (RT_FAILURE(rc))
1205 {
1206 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1207 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1210 {
1211 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1212 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1213 }
1214 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1215 {
1216 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1217 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1218 }
1219 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1220 /** @todo Check reserved bits and such stuff. PGM is better at doing
1221 * that, so do it when implementing the guest virtual address
1222 * TLB... */
1223
1224# ifdef IEM_VERIFICATION_MODE_FULL
1225 /*
1226 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1227 * instruction.
1228 */
1229 /** @todo optimize this differently by not using PGMPhysRead. */
1230 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1231 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1232 if ( offPrevOpcodes < cbOldOpcodes
1233 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1234 {
1235 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1236 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1237 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1238 pVCpu->iem.s.cbOpcode = cbNew;
1239 return VINF_SUCCESS;
1240 }
1241# endif
1242
1243 /*
1244 * Read the bytes at this address.
1245 */
1246 PVM pVM = pVCpu->CTX_SUFF(pVM);
1247# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1248 size_t cbActual;
1249 if ( PATMIsEnabled(pVM)
1250 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1251 {
1252 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1253 Assert(cbActual > 0);
1254 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1255 }
1256 else
1257# endif
1258 {
1259 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1260 if (cbToTryRead > cbLeftOnPage)
1261 cbToTryRead = cbLeftOnPage;
1262 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1263 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1264
1265 if (!pVCpu->iem.s.fBypassHandlers)
1266 {
1267 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1268 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1269 { /* likely */ }
1270 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1271 {
1272 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1273 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1274 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1275 }
1276 else
1277 {
1278 Log((RT_SUCCESS(rcStrict)
1279 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1280 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1281 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1282 return rcStrict;
1283 }
1284 }
1285 else
1286 {
1287 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1288 if (RT_SUCCESS(rc))
1289 { /* likely */ }
1290 else
1291 {
1292 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1293 GCPtrPC, GCPhys, rc, cbToTryRead));
1294 return rc;
1295 }
1296 }
1297 pVCpu->iem.s.cbOpcode = cbToTryRead;
1298 }
1299#endif /* !IEM_WITH_CODE_TLB */
1300 return VINF_SUCCESS;
1301}
1302
1303
1304#ifdef IEM_WITH_CODE_TLB
1305
1306/**
1307 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1308 * failure and jumps.
1309 *
1310 * We end up here for a number of reasons:
1311 * - pbInstrBuf isn't yet initialized.
1312 * - Advancing beyond the buffer boundrary (e.g. cross page).
1313 * - Advancing beyond the CS segment limit.
1314 * - Fetching from non-mappable page (e.g. MMIO).
1315 *
1316 * @param pVCpu The cross context virtual CPU structure of the
1317 * calling thread.
1318 * @param pvDst Where to return the bytes.
1319 * @param cbDst Number of bytes to read.
1320 *
1321 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1322 */
1323IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1324{
1325 Assert(cbDst <= 8);
1326 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1327
1328 /*
1329 * We might have a partial buffer match, deal with that first to make the
1330 * rest simpler. This is the first part of the cross page/buffer case.
1331 */
1332 if (pVCpu->iem.s.pbInstrBuf != NULL)
1333 {
1334 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1335 {
1336 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1337 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1338 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1339
1340 cbDst -= cbCopy;
1341 pvDst = (uint8_t *)pvDst + cbCopy;
1342 offBuf += cbCopy;
1343 pVCpu->iem.s.offInstrNextByte += offBuf;
1344 }
1345 }
1346
1347 /*
1348 * Check segment limit, figuring how much we're allowed to access at this point.
1349 */
1350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1351 RTGCPTR GCPtrFirst;
1352 uint32_t cbMaxRead;
1353 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1354 {
1355 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1356 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1357 { /* likely */ }
1358 else
1359 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1360 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1361 }
1362 else
1363 {
1364 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1365 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1366 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1367 { /* likely */ }
1368 else
1369 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1370 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1371 if (cbMaxRead != 0)
1372 { /* likely */ }
1373 else
1374 {
1375 /* Overflowed because address is 0 and limit is max. */
1376 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1377 cbMaxRead = X86_PAGE_SIZE;
1378 }
1379 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1380 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1381 if (cbMaxRead2 < cbMaxRead)
1382 cbMaxRead = cbMaxRead2;
1383 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1384 }
1385
1386 /*
1387 * Try use the code TLB to translate the address.
1388 */
1389 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1390 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1391 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1392 if (pTlbe->uTag == uTag)
1393 {
1394
1395 }
1396
1397
1398
1399 /*
1400 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1401 *
1402 * First translate CS:rIP to a physical address.
1403 */
1404# if 0 /** @todo later */
1405 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1406 uint32_t cbToTryRead;
1407 RTGCPTR GCPtrNext;
1408 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1409 {
1410 cbToTryRead = PAGE_SIZE;
1411 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1412 if (!IEM_IS_CANONICAL(GCPtrNext))
1413 return iemRaiseGeneralProtectionFault0(pVCpu);
1414 }
1415 else
1416 {
1417 uint32_t GCPtrNext32 = pCtx->eip;
1418 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1419 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1420 if (GCPtrNext32 > pCtx->cs.u32Limit)
1421 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1422 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1423 if (!cbToTryRead) /* overflowed */
1424 {
1425 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1426 cbToTryRead = UINT32_MAX;
1427 /** @todo check out wrapping around the code segment. */
1428 }
1429 if (cbToTryRead < cbMin - cbLeft)
1430 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1431 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1432 }
1433
1434 /* Only read up to the end of the page, and make sure we don't read more
1435 than the opcode buffer can hold. */
1436 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1437 if (cbToTryRead > cbLeftOnPage)
1438 cbToTryRead = cbLeftOnPage;
1439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1441/** @todo r=bird: Convert assertion into undefined opcode exception? */
1442 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1443
1444# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1445 /* Allow interpretation of patch manager code blocks since they can for
1446 instance throw #PFs for perfectly good reasons. */
1447 if (pVCpu->iem.s.fInPatchCode)
1448 {
1449 size_t cbRead = 0;
1450 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1451 AssertRCReturn(rc, rc);
1452 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1453 return VINF_SUCCESS;
1454 }
1455# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1456
1457 RTGCPHYS GCPhys;
1458 uint64_t fFlags;
1459 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1460 if (RT_FAILURE(rc))
1461 {
1462 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1463 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1464 }
1465 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1466 {
1467 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1468 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1469 }
1470 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1471 {
1472 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1473 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1474 }
1475 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1476 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1477 /** @todo Check reserved bits and such stuff. PGM is better at doing
1478 * that, so do it when implementing the guest virtual address
1479 * TLB... */
1480
1481 /*
1482 * Read the bytes at this address.
1483 *
1484 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1485 * and since PATM should only patch the start of an instruction there
1486 * should be no need to check again here.
1487 */
1488 if (!pVCpu->iem.s.fBypassHandlers)
1489 {
1490 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1491 cbToTryRead, PGMACCESSORIGIN_IEM);
1492 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1493 { /* likely */ }
1494 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1495 {
1496 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1497 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1498 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1499 }
1500 else
1501 {
1502 Log((RT_SUCCESS(rcStrict)
1503 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1504 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1505 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1506 return rcStrict;
1507 }
1508 }
1509 else
1510 {
1511 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1512 if (RT_SUCCESS(rc))
1513 { /* likely */ }
1514 else
1515 {
1516 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1517 return rc;
1518 }
1519 }
1520 pVCpu->iem.s.cbOpcode += cbToTryRead;
1521 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1522# endif
1523}
1524
1525#else
1526
1527/**
1528 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1529 * exception if it fails.
1530 *
1531 * @returns Strict VBox status code.
1532 * @param pVCpu The cross context virtual CPU structure of the
1533 * calling thread.
1534 * @param cbMin The minimum number of bytes relative offOpcode
1535 * that must be read.
1536 */
1537IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1538{
1539 /*
1540 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1541 *
1542 * First translate CS:rIP to a physical address.
1543 */
1544 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1545 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1546 uint32_t cbToTryRead;
1547 RTGCPTR GCPtrNext;
1548 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1549 {
1550 cbToTryRead = PAGE_SIZE;
1551 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1552 if (!IEM_IS_CANONICAL(GCPtrNext))
1553 return iemRaiseGeneralProtectionFault0(pVCpu);
1554 }
1555 else
1556 {
1557 uint32_t GCPtrNext32 = pCtx->eip;
1558 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1559 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1560 if (GCPtrNext32 > pCtx->cs.u32Limit)
1561 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1562 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1563 if (!cbToTryRead) /* overflowed */
1564 {
1565 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1566 cbToTryRead = UINT32_MAX;
1567 /** @todo check out wrapping around the code segment. */
1568 }
1569 if (cbToTryRead < cbMin - cbLeft)
1570 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1571 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1572 }
1573
1574 /* Only read up to the end of the page, and make sure we don't read more
1575 than the opcode buffer can hold. */
1576 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1577 if (cbToTryRead > cbLeftOnPage)
1578 cbToTryRead = cbLeftOnPage;
1579 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1580 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1581/** @todo r=bird: Convert assertion into undefined opcode exception? */
1582 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1583
1584# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1585 /* Allow interpretation of patch manager code blocks since they can for
1586 instance throw #PFs for perfectly good reasons. */
1587 if (pVCpu->iem.s.fInPatchCode)
1588 {
1589 size_t cbRead = 0;
1590 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1591 AssertRCReturn(rc, rc);
1592 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1593 return VINF_SUCCESS;
1594 }
1595# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1596
1597 RTGCPHYS GCPhys;
1598 uint64_t fFlags;
1599 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1600 if (RT_FAILURE(rc))
1601 {
1602 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1603 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1604 }
1605 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1606 {
1607 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1608 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1609 }
1610 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1611 {
1612 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1613 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1614 }
1615 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1616 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1617 /** @todo Check reserved bits and such stuff. PGM is better at doing
1618 * that, so do it when implementing the guest virtual address
1619 * TLB... */
1620
1621 /*
1622 * Read the bytes at this address.
1623 *
1624 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1625 * and since PATM should only patch the start of an instruction there
1626 * should be no need to check again here.
1627 */
1628 if (!pVCpu->iem.s.fBypassHandlers)
1629 {
1630 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1631 cbToTryRead, PGMACCESSORIGIN_IEM);
1632 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1633 { /* likely */ }
1634 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1635 {
1636 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1637 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1638 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1639 }
1640 else
1641 {
1642 Log((RT_SUCCESS(rcStrict)
1643 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1644 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1645 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1646 return rcStrict;
1647 }
1648 }
1649 else
1650 {
1651 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1652 if (RT_SUCCESS(rc))
1653 { /* likely */ }
1654 else
1655 {
1656 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1657 return rc;
1658 }
1659 }
1660 pVCpu->iem.s.cbOpcode += cbToTryRead;
1661 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1662
1663 return VINF_SUCCESS;
1664}
1665
1666#endif /* !IEM_WITH_CODE_TLB */
1667#ifndef IEM_WITH_SETJMP
1668
1669/**
1670 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1671 *
1672 * @returns Strict VBox status code.
1673 * @param pVCpu The cross context virtual CPU structure of the
1674 * calling thread.
1675 * @param pb Where to return the opcode byte.
1676 */
1677DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1678{
1679 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1680 if (rcStrict == VINF_SUCCESS)
1681 {
1682 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1683 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1684 pVCpu->iem.s.offOpcode = offOpcode + 1;
1685 }
1686 else
1687 *pb = 0;
1688 return rcStrict;
1689}
1690
1691
1692/**
1693 * Fetches the next opcode byte.
1694 *
1695 * @returns Strict VBox status code.
1696 * @param pVCpu The cross context virtual CPU structure of the
1697 * calling thread.
1698 * @param pu8 Where to return the opcode byte.
1699 */
1700DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1701{
1702 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1703 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1704 {
1705 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1706 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1707 return VINF_SUCCESS;
1708 }
1709 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1710}
1711
1712#else /* IEM_WITH_SETJMP */
1713
1714/**
1715 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1716 *
1717 * @returns The opcode byte.
1718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1719 */
1720DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1721{
1722# ifdef IEM_WITH_CODE_TLB
1723 uint8_t u8;
1724 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1725 return u8;
1726# else
1727 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1728 if (rcStrict == VINF_SUCCESS)
1729 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1730 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1731# endif
1732}
1733
1734
1735/**
1736 * Fetches the next opcode byte, longjmp on error.
1737 *
1738 * @returns The opcode byte.
1739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1740 */
1741DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1742{
1743# ifdef IEM_WITH_CODE_TLB
1744 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1745 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1746 if (RT_LIKELY( pbBuf != NULL
1747 && offBuf < pVCpu->iem.s.cbInstrBuf))
1748 {
1749 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1750 return pbBuf[offBuf];
1751 }
1752# else
1753 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1754 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1755 {
1756 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1757 return pVCpu->iem.s.abOpcode[offOpcode];
1758 }
1759# endif
1760 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1761}
1762
1763#endif /* IEM_WITH_SETJMP */
1764
1765/**
1766 * Fetches the next opcode byte, returns automatically on failure.
1767 *
1768 * @param a_pu8 Where to return the opcode byte.
1769 * @remark Implicitly references pVCpu.
1770 */
1771#ifndef IEM_WITH_SETJMP
1772# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1773 do \
1774 { \
1775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1776 if (rcStrict2 == VINF_SUCCESS) \
1777 { /* likely */ } \
1778 else \
1779 return rcStrict2; \
1780 } while (0)
1781#else
1782# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
1783#endif /* IEM_WITH_SETJMP */
1784
1785
1786#ifndef IEM_WITH_SETJMP
1787/**
1788 * Fetches the next signed byte from the opcode stream.
1789 *
1790 * @returns Strict VBox status code.
1791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1792 * @param pi8 Where to return the signed byte.
1793 */
1794DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
1795{
1796 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
1797}
1798#endif /* !IEM_WITH_SETJMP */
1799
1800
1801/**
1802 * Fetches the next signed byte from the opcode stream, returning automatically
1803 * on failure.
1804 *
1805 * @param a_pi8 Where to return the signed byte.
1806 * @remark Implicitly references pVCpu.
1807 */
1808#ifndef IEM_WITH_SETJMP
1809# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1810 do \
1811 { \
1812 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
1813 if (rcStrict2 != VINF_SUCCESS) \
1814 return rcStrict2; \
1815 } while (0)
1816#else /* IEM_WITH_SETJMP */
1817# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1818
1819#endif /* IEM_WITH_SETJMP */
1820
1821#ifndef IEM_WITH_SETJMP
1822
1823/**
1824 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1825 *
1826 * @returns Strict VBox status code.
1827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1828 * @param pu16 Where to return the opcode dword.
1829 */
1830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
1831{
1832 uint8_t u8;
1833 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1834 if (rcStrict == VINF_SUCCESS)
1835 *pu16 = (int8_t)u8;
1836 return rcStrict;
1837}
1838
1839
1840/**
1841 * Fetches the next signed byte from the opcode stream, extending it to
1842 * unsigned 16-bit.
1843 *
1844 * @returns Strict VBox status code.
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 * @param pu16 Where to return the unsigned word.
1847 */
1848DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
1849{
1850 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1851 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1852 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
1853
1854 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1855 pVCpu->iem.s.offOpcode = offOpcode + 1;
1856 return VINF_SUCCESS;
1857}
1858
1859#endif /* !IEM_WITH_SETJMP */
1860
1861/**
1862 * Fetches the next signed byte from the opcode stream and sign-extending it to
1863 * a word, returning automatically on failure.
1864 *
1865 * @param a_pu16 Where to return the word.
1866 * @remark Implicitly references pVCpu.
1867 */
1868#ifndef IEM_WITH_SETJMP
1869# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1870 do \
1871 { \
1872 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
1873 if (rcStrict2 != VINF_SUCCESS) \
1874 return rcStrict2; \
1875 } while (0)
1876#else
1877# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1878#endif
1879
1880#ifndef IEM_WITH_SETJMP
1881
1882/**
1883 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1884 *
1885 * @returns Strict VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1887 * @param pu32 Where to return the opcode dword.
1888 */
1889DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
1890{
1891 uint8_t u8;
1892 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1893 if (rcStrict == VINF_SUCCESS)
1894 *pu32 = (int8_t)u8;
1895 return rcStrict;
1896}
1897
1898
1899/**
1900 * Fetches the next signed byte from the opcode stream, extending it to
1901 * unsigned 32-bit.
1902 *
1903 * @returns Strict VBox status code.
1904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1905 * @param pu32 Where to return the unsigned dword.
1906 */
1907DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
1908{
1909 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1910 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1911 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
1912
1913 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1914 pVCpu->iem.s.offOpcode = offOpcode + 1;
1915 return VINF_SUCCESS;
1916}
1917
1918#endif /* !IEM_WITH_SETJMP */
1919
1920/**
1921 * Fetches the next signed byte from the opcode stream and sign-extending it to
1922 * a word, returning automatically on failure.
1923 *
1924 * @param a_pu32 Where to return the word.
1925 * @remark Implicitly references pVCpu.
1926 */
1927#ifndef IEM_WITH_SETJMP
1928#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1929 do \
1930 { \
1931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
1932 if (rcStrict2 != VINF_SUCCESS) \
1933 return rcStrict2; \
1934 } while (0)
1935#else
1936# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1937#endif
1938
1939#ifndef IEM_WITH_SETJMP
1940
1941/**
1942 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1943 *
1944 * @returns Strict VBox status code.
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 * @param pu64 Where to return the opcode qword.
1947 */
1948DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
1949{
1950 uint8_t u8;
1951 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1952 if (rcStrict == VINF_SUCCESS)
1953 *pu64 = (int8_t)u8;
1954 return rcStrict;
1955}
1956
1957
1958/**
1959 * Fetches the next signed byte from the opcode stream, extending it to
1960 * unsigned 64-bit.
1961 *
1962 * @returns Strict VBox status code.
1963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1964 * @param pu64 Where to return the unsigned qword.
1965 */
1966DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
1967{
1968 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1969 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1970 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
1971
1972 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1973 pVCpu->iem.s.offOpcode = offOpcode + 1;
1974 return VINF_SUCCESS;
1975}
1976
1977#endif /* !IEM_WITH_SETJMP */
1978
1979
1980/**
1981 * Fetches the next signed byte from the opcode stream and sign-extending it to
1982 * a word, returning automatically on failure.
1983 *
1984 * @param a_pu64 Where to return the word.
1985 * @remark Implicitly references pVCpu.
1986 */
1987#ifndef IEM_WITH_SETJMP
1988# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1989 do \
1990 { \
1991 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
1992 if (rcStrict2 != VINF_SUCCESS) \
1993 return rcStrict2; \
1994 } while (0)
1995#else
1996# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1997#endif
1998
1999
2000#ifndef IEM_WITH_SETJMP
2001
2002/**
2003 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2004 *
2005 * @returns Strict VBox status code.
2006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2007 * @param pu16 Where to return the opcode word.
2008 */
2009DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2010{
2011 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2012 if (rcStrict == VINF_SUCCESS)
2013 {
2014 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2015# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2016 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2017# else
2018 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2019# endif
2020 pVCpu->iem.s.offOpcode = offOpcode + 2;
2021 }
2022 else
2023 *pu16 = 0;
2024 return rcStrict;
2025}
2026
2027
2028/**
2029 * Fetches the next opcode word.
2030 *
2031 * @returns Strict VBox status code.
2032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2033 * @param pu16 Where to return the opcode word.
2034 */
2035DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2036{
2037 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2038 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2039 {
2040 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2041# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2042 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2043# else
2044 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2045# endif
2046 return VINF_SUCCESS;
2047 }
2048 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2049}
2050
2051#else /* IEM_WITH_SETJMP */
2052
2053/**
2054 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2055 *
2056 * @returns The opcode word.
2057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2058 */
2059DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2060{
2061# ifdef IEM_WITH_CODE_TLB
2062 uint16_t u16;
2063 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2064 return u16;
2065# else
2066 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2067 if (rcStrict == VINF_SUCCESS)
2068 {
2069 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2070 pVCpu->iem.s.offOpcode += 2;
2071# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2072 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2073# else
2074 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2075# endif
2076 }
2077 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2078# endif
2079}
2080
2081
2082/**
2083 * Fetches the next opcode word, longjmp on error.
2084 *
2085 * @returns The opcode word.
2086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2087 */
2088DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2089{
2090# ifdef IEM_WITH_CODE_TLB
2091 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2092 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2093 if (RT_LIKELY( pbBuf != NULL
2094 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2095 {
2096 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2097# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2098 return *(uint16_t const *)&pbBuf[offBuf];
2099# else
2100 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2101# endif
2102 }
2103# else
2104 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2105 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2106 {
2107 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2108# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2109 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2110# else
2111 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2112# endif
2113 }
2114# endif
2115 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2116}
2117
2118#endif /* IEM_WITH_SETJMP */
2119
2120
2121/**
2122 * Fetches the next opcode word, returns automatically on failure.
2123 *
2124 * @param a_pu16 Where to return the opcode word.
2125 * @remark Implicitly references pVCpu.
2126 */
2127#ifndef IEM_WITH_SETJMP
2128# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2129 do \
2130 { \
2131 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2132 if (rcStrict2 != VINF_SUCCESS) \
2133 return rcStrict2; \
2134 } while (0)
2135#else
2136# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2137#endif
2138
2139#ifndef IEM_WITH_SETJMP
2140
2141/**
2142 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2143 *
2144 * @returns Strict VBox status code.
2145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2146 * @param pu32 Where to return the opcode double word.
2147 */
2148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2149{
2150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2151 if (rcStrict == VINF_SUCCESS)
2152 {
2153 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2154 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2155 pVCpu->iem.s.offOpcode = offOpcode + 2;
2156 }
2157 else
2158 *pu32 = 0;
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Fetches the next opcode word, zero extending it to a double word.
2165 *
2166 * @returns Strict VBox status code.
2167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2168 * @param pu32 Where to return the opcode double word.
2169 */
2170DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2171{
2172 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2173 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2174 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2175
2176 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2177 pVCpu->iem.s.offOpcode = offOpcode + 2;
2178 return VINF_SUCCESS;
2179}
2180
2181#endif /* !IEM_WITH_SETJMP */
2182
2183
2184/**
2185 * Fetches the next opcode word and zero extends it to a double word, returns
2186 * automatically on failure.
2187 *
2188 * @param a_pu32 Where to return the opcode double word.
2189 * @remark Implicitly references pVCpu.
2190 */
2191#ifndef IEM_WITH_SETJMP
2192# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2193 do \
2194 { \
2195 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2196 if (rcStrict2 != VINF_SUCCESS) \
2197 return rcStrict2; \
2198 } while (0)
2199#else
2200# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2201#endif
2202
2203#ifndef IEM_WITH_SETJMP
2204
2205/**
2206 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2207 *
2208 * @returns Strict VBox status code.
2209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2210 * @param pu64 Where to return the opcode quad word.
2211 */
2212DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2213{
2214 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2215 if (rcStrict == VINF_SUCCESS)
2216 {
2217 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2218 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2219 pVCpu->iem.s.offOpcode = offOpcode + 2;
2220 }
2221 else
2222 *pu64 = 0;
2223 return rcStrict;
2224}
2225
2226
2227/**
2228 * Fetches the next opcode word, zero extending it to a quad word.
2229 *
2230 * @returns Strict VBox status code.
2231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2232 * @param pu64 Where to return the opcode quad word.
2233 */
2234DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2235{
2236 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2237 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2238 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2239
2240 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2241 pVCpu->iem.s.offOpcode = offOpcode + 2;
2242 return VINF_SUCCESS;
2243}
2244
2245#endif /* !IEM_WITH_SETJMP */
2246
2247/**
2248 * Fetches the next opcode word and zero extends it to a quad word, returns
2249 * automatically on failure.
2250 *
2251 * @param a_pu64 Where to return the opcode quad word.
2252 * @remark Implicitly references pVCpu.
2253 */
2254#ifndef IEM_WITH_SETJMP
2255# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2256 do \
2257 { \
2258 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2259 if (rcStrict2 != VINF_SUCCESS) \
2260 return rcStrict2; \
2261 } while (0)
2262#else
2263# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2264#endif
2265
2266
2267#ifndef IEM_WITH_SETJMP
2268/**
2269 * Fetches the next signed word from the opcode stream.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pi16 Where to return the signed word.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2276{
2277 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2278}
2279#endif /* !IEM_WITH_SETJMP */
2280
2281
2282/**
2283 * Fetches the next signed word from the opcode stream, returning automatically
2284 * on failure.
2285 *
2286 * @param a_pi16 Where to return the signed word.
2287 * @remark Implicitly references pVCpu.
2288 */
2289#ifndef IEM_WITH_SETJMP
2290# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2291 do \
2292 { \
2293 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2294 if (rcStrict2 != VINF_SUCCESS) \
2295 return rcStrict2; \
2296 } while (0)
2297#else
2298# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2299#endif
2300
2301#ifndef IEM_WITH_SETJMP
2302
2303/**
2304 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2305 *
2306 * @returns Strict VBox status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param pu32 Where to return the opcode dword.
2309 */
2310DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2311{
2312 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2313 if (rcStrict == VINF_SUCCESS)
2314 {
2315 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2316# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2317 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2318# else
2319 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2320 pVCpu->iem.s.abOpcode[offOpcode + 1],
2321 pVCpu->iem.s.abOpcode[offOpcode + 2],
2322 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2323# endif
2324 pVCpu->iem.s.offOpcode = offOpcode + 4;
2325 }
2326 else
2327 *pu32 = 0;
2328 return rcStrict;
2329}
2330
2331
2332/**
2333 * Fetches the next opcode dword.
2334 *
2335 * @returns Strict VBox status code.
2336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2337 * @param pu32 Where to return the opcode double word.
2338 */
2339DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2340{
2341 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2342 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2343 {
2344 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2345# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2346 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2347# else
2348 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2349 pVCpu->iem.s.abOpcode[offOpcode + 1],
2350 pVCpu->iem.s.abOpcode[offOpcode + 2],
2351 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2352# endif
2353 return VINF_SUCCESS;
2354 }
2355 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2356}
2357
2358#else /* !IEM_WITH_SETJMP */
2359
2360/**
2361 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2362 *
2363 * @returns The opcode dword.
2364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2365 */
2366DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2367{
2368# ifdef IEM_WITH_CODE_TLB
2369 uint32_t u32;
2370 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2371 return u32;
2372# else
2373 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2374 if (rcStrict == VINF_SUCCESS)
2375 {
2376 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2377 pVCpu->iem.s.offOpcode = offOpcode + 4;
2378# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2379 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2380# else
2381 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2382 pVCpu->iem.s.abOpcode[offOpcode + 1],
2383 pVCpu->iem.s.abOpcode[offOpcode + 2],
2384 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2385# endif
2386 }
2387 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2388# endif
2389}
2390
2391
2392/**
2393 * Fetches the next opcode dword, longjmp on error.
2394 *
2395 * @returns The opcode dword.
2396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2397 */
2398DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2399{
2400# ifdef IEM_WITH_CODE_TLB
2401 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2402 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2403 if (RT_LIKELY( pbBuf != NULL
2404 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2405 {
2406 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2407# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2408 return *(uint32_t const *)&pbBuf[offBuf];
2409# else
2410 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2411 pbBuf[offBuf + 1],
2412 pbBuf[offBuf + 2],
2413 pbBuf[offBuf + 3]);
2414# endif
2415 }
2416# else
2417 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2418 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2419 {
2420 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2421# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2422 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2423# else
2424 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2425 pVCpu->iem.s.abOpcode[offOpcode + 1],
2426 pVCpu->iem.s.abOpcode[offOpcode + 2],
2427 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2428# endif
2429 }
2430# endif
2431 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2432}
2433
2434#endif /* !IEM_WITH_SETJMP */
2435
2436
2437/**
2438 * Fetches the next opcode dword, returns automatically on failure.
2439 *
2440 * @param a_pu32 Where to return the opcode dword.
2441 * @remark Implicitly references pVCpu.
2442 */
2443#ifndef IEM_WITH_SETJMP
2444# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2445 do \
2446 { \
2447 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2448 if (rcStrict2 != VINF_SUCCESS) \
2449 return rcStrict2; \
2450 } while (0)
2451#else
2452# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2453#endif
2454
2455#ifndef IEM_WITH_SETJMP
2456
2457/**
2458 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 * @param pu64 Where to return the opcode dword.
2463 */
2464DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2465{
2466 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2467 if (rcStrict == VINF_SUCCESS)
2468 {
2469 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2470 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2471 pVCpu->iem.s.abOpcode[offOpcode + 1],
2472 pVCpu->iem.s.abOpcode[offOpcode + 2],
2473 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2474 pVCpu->iem.s.offOpcode = offOpcode + 4;
2475 }
2476 else
2477 *pu64 = 0;
2478 return rcStrict;
2479}
2480
2481
2482/**
2483 * Fetches the next opcode dword, zero extending it to a quad word.
2484 *
2485 * @returns Strict VBox status code.
2486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2487 * @param pu64 Where to return the opcode quad word.
2488 */
2489DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2490{
2491 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2492 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2493 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2494
2495 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2496 pVCpu->iem.s.abOpcode[offOpcode + 1],
2497 pVCpu->iem.s.abOpcode[offOpcode + 2],
2498 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2499 pVCpu->iem.s.offOpcode = offOpcode + 4;
2500 return VINF_SUCCESS;
2501}
2502
2503#endif /* !IEM_WITH_SETJMP */
2504
2505
2506/**
2507 * Fetches the next opcode dword and zero extends it to a quad word, returns
2508 * automatically on failure.
2509 *
2510 * @param a_pu64 Where to return the opcode quad word.
2511 * @remark Implicitly references pVCpu.
2512 */
2513#ifndef IEM_WITH_SETJMP
2514# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2515 do \
2516 { \
2517 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2518 if (rcStrict2 != VINF_SUCCESS) \
2519 return rcStrict2; \
2520 } while (0)
2521#else
2522# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2523#endif
2524
2525
2526#ifndef IEM_WITH_SETJMP
2527/**
2528 * Fetches the next signed double word from the opcode stream.
2529 *
2530 * @returns Strict VBox status code.
2531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2532 * @param pi32 Where to return the signed double word.
2533 */
2534DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2535{
2536 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2537}
2538#endif
2539
2540/**
2541 * Fetches the next signed double word from the opcode stream, returning
2542 * automatically on failure.
2543 *
2544 * @param a_pi32 Where to return the signed double word.
2545 * @remark Implicitly references pVCpu.
2546 */
2547#ifndef IEM_WITH_SETJMP
2548# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2549 do \
2550 { \
2551 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2552 if (rcStrict2 != VINF_SUCCESS) \
2553 return rcStrict2; \
2554 } while (0)
2555#else
2556# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2557#endif
2558
2559#ifndef IEM_WITH_SETJMP
2560
2561/**
2562 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2563 *
2564 * @returns Strict VBox status code.
2565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2566 * @param pu64 Where to return the opcode qword.
2567 */
2568DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2569{
2570 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2571 if (rcStrict == VINF_SUCCESS)
2572 {
2573 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2574 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2575 pVCpu->iem.s.abOpcode[offOpcode + 1],
2576 pVCpu->iem.s.abOpcode[offOpcode + 2],
2577 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2578 pVCpu->iem.s.offOpcode = offOpcode + 4;
2579 }
2580 else
2581 *pu64 = 0;
2582 return rcStrict;
2583}
2584
2585
2586/**
2587 * Fetches the next opcode dword, sign extending it into a quad word.
2588 *
2589 * @returns Strict VBox status code.
2590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2591 * @param pu64 Where to return the opcode quad word.
2592 */
2593DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2594{
2595 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2596 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2597 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2598
2599 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2600 pVCpu->iem.s.abOpcode[offOpcode + 1],
2601 pVCpu->iem.s.abOpcode[offOpcode + 2],
2602 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2603 *pu64 = i32;
2604 pVCpu->iem.s.offOpcode = offOpcode + 4;
2605 return VINF_SUCCESS;
2606}
2607
2608#endif /* !IEM_WITH_SETJMP */
2609
2610
2611/**
2612 * Fetches the next opcode double word and sign extends it to a quad word,
2613 * returns automatically on failure.
2614 *
2615 * @param a_pu64 Where to return the opcode quad word.
2616 * @remark Implicitly references pVCpu.
2617 */
2618#ifndef IEM_WITH_SETJMP
2619# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2620 do \
2621 { \
2622 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2623 if (rcStrict2 != VINF_SUCCESS) \
2624 return rcStrict2; \
2625 } while (0)
2626#else
2627# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2628#endif
2629
2630#ifndef IEM_WITH_SETJMP
2631
2632/**
2633 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2634 *
2635 * @returns Strict VBox status code.
2636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2637 * @param pu64 Where to return the opcode qword.
2638 */
2639DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2640{
2641 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2642 if (rcStrict == VINF_SUCCESS)
2643 {
2644 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2645# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2646 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2647# else
2648 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2649 pVCpu->iem.s.abOpcode[offOpcode + 1],
2650 pVCpu->iem.s.abOpcode[offOpcode + 2],
2651 pVCpu->iem.s.abOpcode[offOpcode + 3],
2652 pVCpu->iem.s.abOpcode[offOpcode + 4],
2653 pVCpu->iem.s.abOpcode[offOpcode + 5],
2654 pVCpu->iem.s.abOpcode[offOpcode + 6],
2655 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2656# endif
2657 pVCpu->iem.s.offOpcode = offOpcode + 8;
2658 }
2659 else
2660 *pu64 = 0;
2661 return rcStrict;
2662}
2663
2664
2665/**
2666 * Fetches the next opcode qword.
2667 *
2668 * @returns Strict VBox status code.
2669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2670 * @param pu64 Where to return the opcode qword.
2671 */
2672DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2673{
2674 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2675 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2676 {
2677# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2678 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2679# else
2680 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2681 pVCpu->iem.s.abOpcode[offOpcode + 1],
2682 pVCpu->iem.s.abOpcode[offOpcode + 2],
2683 pVCpu->iem.s.abOpcode[offOpcode + 3],
2684 pVCpu->iem.s.abOpcode[offOpcode + 4],
2685 pVCpu->iem.s.abOpcode[offOpcode + 5],
2686 pVCpu->iem.s.abOpcode[offOpcode + 6],
2687 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2688# endif
2689 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2690 return VINF_SUCCESS;
2691 }
2692 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2693}
2694
2695#else /* IEM_WITH_SETJMP */
2696
2697/**
2698 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2699 *
2700 * @returns The opcode qword.
2701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2702 */
2703DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2704{
2705# ifdef IEM_WITH_CODE_TLB
2706 uint64_t u64;
2707 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2708 return u64;
2709# else
2710 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2711 if (rcStrict == VINF_SUCCESS)
2712 {
2713 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2714 pVCpu->iem.s.offOpcode = offOpcode + 8;
2715# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2716 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2717# else
2718 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2719 pVCpu->iem.s.abOpcode[offOpcode + 1],
2720 pVCpu->iem.s.abOpcode[offOpcode + 2],
2721 pVCpu->iem.s.abOpcode[offOpcode + 3],
2722 pVCpu->iem.s.abOpcode[offOpcode + 4],
2723 pVCpu->iem.s.abOpcode[offOpcode + 5],
2724 pVCpu->iem.s.abOpcode[offOpcode + 6],
2725 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2726# endif
2727 }
2728 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2729# endif
2730}
2731
2732
2733/**
2734 * Fetches the next opcode qword, longjmp on error.
2735 *
2736 * @returns The opcode qword.
2737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2738 */
2739DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2740{
2741# ifdef IEM_WITH_CODE_TLB
2742 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2743 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2744 if (RT_LIKELY( pbBuf != NULL
2745 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2746 {
2747 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2748# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2749 return *(uint64_t const *)&pbBuf[offBuf];
2750# else
2751 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2752 pbBuf[offBuf + 1],
2753 pbBuf[offBuf + 2],
2754 pbBuf[offBuf + 3],
2755 pbBuf[offBuf + 4],
2756 pbBuf[offBuf + 5],
2757 pbBuf[offBuf + 6],
2758 pbBuf[offBuf + 7]);
2759# endif
2760 }
2761# else
2762 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2763 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2764 {
2765 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2766# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2767 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2768# else
2769 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2770 pVCpu->iem.s.abOpcode[offOpcode + 1],
2771 pVCpu->iem.s.abOpcode[offOpcode + 2],
2772 pVCpu->iem.s.abOpcode[offOpcode + 3],
2773 pVCpu->iem.s.abOpcode[offOpcode + 4],
2774 pVCpu->iem.s.abOpcode[offOpcode + 5],
2775 pVCpu->iem.s.abOpcode[offOpcode + 6],
2776 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2777# endif
2778 }
2779# endif
2780 return iemOpcodeGetNextU64SlowJmp(pVCpu);
2781}
2782
2783#endif /* IEM_WITH_SETJMP */
2784
2785/**
2786 * Fetches the next opcode quad word, returns automatically on failure.
2787 *
2788 * @param a_pu64 Where to return the opcode quad word.
2789 * @remark Implicitly references pVCpu.
2790 */
2791#ifndef IEM_WITH_SETJMP
2792# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
2793 do \
2794 { \
2795 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
2796 if (rcStrict2 != VINF_SUCCESS) \
2797 return rcStrict2; \
2798 } while (0)
2799#else
2800# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
2801#endif
2802
2803
2804/** @name Misc Worker Functions.
2805 * @{
2806 */
2807
2808
2809/**
2810 * Validates a new SS segment.
2811 *
2812 * @returns VBox strict status code.
2813 * @param pVCpu The cross context virtual CPU structure of the
2814 * calling thread.
2815 * @param pCtx The CPU context.
2816 * @param NewSS The new SS selctor.
2817 * @param uCpl The CPL to load the stack for.
2818 * @param pDesc Where to return the descriptor.
2819 */
2820IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2821{
2822 NOREF(pCtx);
2823
2824 /* Null selectors are not allowed (we're not called for dispatching
2825 interrupts with SS=0 in long mode). */
2826 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2827 {
2828 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2829 return iemRaiseTaskSwitchFault0(pVCpu);
2830 }
2831
2832 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2833 if ((NewSS & X86_SEL_RPL) != uCpl)
2834 {
2835 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2836 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2837 }
2838
2839 /*
2840 * Read the descriptor.
2841 */
2842 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2843 if (rcStrict != VINF_SUCCESS)
2844 return rcStrict;
2845
2846 /*
2847 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2848 */
2849 if (!pDesc->Legacy.Gen.u1DescType)
2850 {
2851 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2852 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2853 }
2854
2855 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2856 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2857 {
2858 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2859 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2860 }
2861 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2862 {
2863 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2864 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2865 }
2866
2867 /* Is it there? */
2868 /** @todo testcase: Is this checked before the canonical / limit check below? */
2869 if (!pDesc->Legacy.Gen.u1Present)
2870 {
2871 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2872 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2873 }
2874
2875 return VINF_SUCCESS;
2876}
2877
2878
2879/**
2880 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2881 * not.
2882 *
2883 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2884 * @param a_pCtx The CPU context.
2885 */
2886#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2887# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2888 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
2889 ? (a_pCtx)->eflags.u \
2890 : CPUMRawGetEFlags(a_pVCpu) )
2891#else
2892# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2893 ( (a_pCtx)->eflags.u )
2894#endif
2895
2896/**
2897 * Updates the EFLAGS in the correct manner wrt. PATM.
2898 *
2899 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param a_pCtx The CPU context.
2901 * @param a_fEfl The new EFLAGS.
2902 */
2903#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2904# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
2905 do { \
2906 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
2907 (a_pCtx)->eflags.u = (a_fEfl); \
2908 else \
2909 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
2910 } while (0)
2911#else
2912# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
2913 do { \
2914 (a_pCtx)->eflags.u = (a_fEfl); \
2915 } while (0)
2916#endif
2917
2918
2919/** @} */
2920
2921/** @name Raising Exceptions.
2922 *
2923 * @{
2924 */
2925
2926/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2927 * @{ */
2928/** CPU exception. */
2929#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2930/** External interrupt (from PIC, APIC, whatever). */
2931#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2932/** Software interrupt (int or into, not bound).
2933 * Returns to the following instruction */
2934#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2935/** Takes an error code. */
2936#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2937/** Takes a CR2. */
2938#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2939/** Generated by the breakpoint instruction. */
2940#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2941/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2942#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2943/** @} */
2944
2945
2946/**
2947 * Loads the specified stack far pointer from the TSS.
2948 *
2949 * @returns VBox strict status code.
2950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2951 * @param pCtx The CPU context.
2952 * @param uCpl The CPL to load the stack for.
2953 * @param pSelSS Where to return the new stack segment.
2954 * @param puEsp Where to return the new stack pointer.
2955 */
2956IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2957 PRTSEL pSelSS, uint32_t *puEsp)
2958{
2959 VBOXSTRICTRC rcStrict;
2960 Assert(uCpl < 4);
2961
2962 switch (pCtx->tr.Attr.n.u4Type)
2963 {
2964 /*
2965 * 16-bit TSS (X86TSS16).
2966 */
2967 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2968 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2969 {
2970 uint32_t off = uCpl * 4 + 2;
2971 if (off + 4 <= pCtx->tr.u32Limit)
2972 {
2973 /** @todo check actual access pattern here. */
2974 uint32_t u32Tmp = 0; /* gcc maybe... */
2975 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2976 if (rcStrict == VINF_SUCCESS)
2977 {
2978 *puEsp = RT_LOWORD(u32Tmp);
2979 *pSelSS = RT_HIWORD(u32Tmp);
2980 return VINF_SUCCESS;
2981 }
2982 }
2983 else
2984 {
2985 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2986 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2987 }
2988 break;
2989 }
2990
2991 /*
2992 * 32-bit TSS (X86TSS32).
2993 */
2994 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2995 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2996 {
2997 uint32_t off = uCpl * 8 + 4;
2998 if (off + 7 <= pCtx->tr.u32Limit)
2999 {
3000/** @todo check actual access pattern here. */
3001 uint64_t u64Tmp;
3002 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3003 if (rcStrict == VINF_SUCCESS)
3004 {
3005 *puEsp = u64Tmp & UINT32_MAX;
3006 *pSelSS = (RTSEL)(u64Tmp >> 32);
3007 return VINF_SUCCESS;
3008 }
3009 }
3010 else
3011 {
3012 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3013 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3014 }
3015 break;
3016 }
3017
3018 default:
3019 AssertFailed();
3020 rcStrict = VERR_IEM_IPE_4;
3021 break;
3022 }
3023
3024 *puEsp = 0; /* make gcc happy */
3025 *pSelSS = 0; /* make gcc happy */
3026 return rcStrict;
3027}
3028
3029
3030/**
3031 * Loads the specified stack pointer from the 64-bit TSS.
3032 *
3033 * @returns VBox strict status code.
3034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3035 * @param pCtx The CPU context.
3036 * @param uCpl The CPL to load the stack for.
3037 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3038 * @param puRsp Where to return the new stack pointer.
3039 */
3040IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3041{
3042 Assert(uCpl < 4);
3043 Assert(uIst < 8);
3044 *puRsp = 0; /* make gcc happy */
3045
3046 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3047
3048 uint32_t off;
3049 if (uIst)
3050 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3051 else
3052 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3053 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3054 {
3055 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3056 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3057 }
3058
3059 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3060}
3061
3062
3063/**
3064 * Adjust the CPU state according to the exception being raised.
3065 *
3066 * @param pCtx The CPU context.
3067 * @param u8Vector The exception that has been raised.
3068 */
3069DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3070{
3071 switch (u8Vector)
3072 {
3073 case X86_XCPT_DB:
3074 pCtx->dr[7] &= ~X86_DR7_GD;
3075 break;
3076 /** @todo Read the AMD and Intel exception reference... */
3077 }
3078}
3079
3080
3081/**
3082 * Implements exceptions and interrupts for real mode.
3083 *
3084 * @returns VBox strict status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pCtx The CPU context.
3087 * @param cbInstr The number of bytes to offset rIP by in the return
3088 * address.
3089 * @param u8Vector The interrupt / exception vector number.
3090 * @param fFlags The flags.
3091 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3092 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3093 */
3094IEM_STATIC VBOXSTRICTRC
3095iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3096 PCPUMCTX pCtx,
3097 uint8_t cbInstr,
3098 uint8_t u8Vector,
3099 uint32_t fFlags,
3100 uint16_t uErr,
3101 uint64_t uCr2)
3102{
3103 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3104 NOREF(uErr); NOREF(uCr2);
3105
3106 /*
3107 * Read the IDT entry.
3108 */
3109 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3110 {
3111 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3112 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3113 }
3114 RTFAR16 Idte;
3115 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3116 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3117 return rcStrict;
3118
3119 /*
3120 * Push the stack frame.
3121 */
3122 uint16_t *pu16Frame;
3123 uint64_t uNewRsp;
3124 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3125 if (rcStrict != VINF_SUCCESS)
3126 return rcStrict;
3127
3128 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3129#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3130 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3131 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3132 fEfl |= UINT16_C(0xf000);
3133#endif
3134 pu16Frame[2] = (uint16_t)fEfl;
3135 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3136 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3137 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3138 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3139 return rcStrict;
3140
3141 /*
3142 * Load the vector address into cs:ip and make exception specific state
3143 * adjustments.
3144 */
3145 pCtx->cs.Sel = Idte.sel;
3146 pCtx->cs.ValidSel = Idte.sel;
3147 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3148 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3149 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3150 pCtx->rip = Idte.off;
3151 fEfl &= ~X86_EFL_IF;
3152 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3153
3154 /** @todo do we actually do this in real mode? */
3155 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3156 iemRaiseXcptAdjustState(pCtx, u8Vector);
3157
3158 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3159}
3160
3161
3162/**
3163 * Loads a NULL data selector into when coming from V8086 mode.
3164 *
3165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3166 * @param pSReg Pointer to the segment register.
3167 */
3168IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3169{
3170 pSReg->Sel = 0;
3171 pSReg->ValidSel = 0;
3172 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3173 {
3174 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3175 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3176 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3177 }
3178 else
3179 {
3180 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3181 /** @todo check this on AMD-V */
3182 pSReg->u64Base = 0;
3183 pSReg->u32Limit = 0;
3184 }
3185}
3186
3187
3188/**
3189 * Loads a segment selector during a task switch in V8086 mode.
3190 *
3191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3192 * @param pSReg Pointer to the segment register.
3193 * @param uSel The selector value to load.
3194 */
3195IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3196{
3197 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3198 pSReg->Sel = uSel;
3199 pSReg->ValidSel = uSel;
3200 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3201 pSReg->u64Base = uSel << 4;
3202 pSReg->u32Limit = 0xffff;
3203 pSReg->Attr.u = 0xf3;
3204}
3205
3206
3207/**
3208 * Loads a NULL data selector into a selector register, both the hidden and
3209 * visible parts, in protected mode.
3210 *
3211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3212 * @param pSReg Pointer to the segment register.
3213 * @param uRpl The RPL.
3214 */
3215IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3216{
3217 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3218 * data selector in protected mode. */
3219 pSReg->Sel = uRpl;
3220 pSReg->ValidSel = uRpl;
3221 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3222 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3223 {
3224 /* VT-x (Intel 3960x) observed doing something like this. */
3225 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3226 pSReg->u32Limit = UINT32_MAX;
3227 pSReg->u64Base = 0;
3228 }
3229 else
3230 {
3231 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3232 pSReg->u32Limit = 0;
3233 pSReg->u64Base = 0;
3234 }
3235}
3236
3237
3238/**
3239 * Loads a segment selector during a task switch in protected mode.
3240 *
3241 * In this task switch scenario, we would throw \#TS exceptions rather than
3242 * \#GPs.
3243 *
3244 * @returns VBox strict status code.
3245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3246 * @param pSReg Pointer to the segment register.
3247 * @param uSel The new selector value.
3248 *
3249 * @remarks This does _not_ handle CS or SS.
3250 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3251 */
3252IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3253{
3254 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3255
3256 /* Null data selector. */
3257 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3258 {
3259 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3261 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3262 return VINF_SUCCESS;
3263 }
3264
3265 /* Fetch the descriptor. */
3266 IEMSELDESC Desc;
3267 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3268 if (rcStrict != VINF_SUCCESS)
3269 {
3270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3271 VBOXSTRICTRC_VAL(rcStrict)));
3272 return rcStrict;
3273 }
3274
3275 /* Must be a data segment or readable code segment. */
3276 if ( !Desc.Legacy.Gen.u1DescType
3277 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3278 {
3279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3280 Desc.Legacy.Gen.u4Type));
3281 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3282 }
3283
3284 /* Check privileges for data segments and non-conforming code segments. */
3285 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3286 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3287 {
3288 /* The RPL and the new CPL must be less than or equal to the DPL. */
3289 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3290 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3291 {
3292 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3293 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3294 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3295 }
3296 }
3297
3298 /* Is it there? */
3299 if (!Desc.Legacy.Gen.u1Present)
3300 {
3301 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3302 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3303 }
3304
3305 /* The base and limit. */
3306 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3307 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3308
3309 /*
3310 * Ok, everything checked out fine. Now set the accessed bit before
3311 * committing the result into the registers.
3312 */
3313 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3314 {
3315 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3316 if (rcStrict != VINF_SUCCESS)
3317 return rcStrict;
3318 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3319 }
3320
3321 /* Commit */
3322 pSReg->Sel = uSel;
3323 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3324 pSReg->u32Limit = cbLimit;
3325 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3326 pSReg->ValidSel = uSel;
3327 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3329 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3330
3331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3332 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3333 return VINF_SUCCESS;
3334}
3335
3336
3337/**
3338 * Performs a task switch.
3339 *
3340 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3341 * caller is responsible for performing the necessary checks (like DPL, TSS
3342 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3343 * reference for JMP, CALL, IRET.
3344 *
3345 * If the task switch is the due to a software interrupt or hardware exception,
3346 * the caller is responsible for validating the TSS selector and descriptor. See
3347 * Intel Instruction reference for INT n.
3348 *
3349 * @returns VBox strict status code.
3350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3351 * @param pCtx The CPU context.
3352 * @param enmTaskSwitch What caused this task switch.
3353 * @param uNextEip The EIP effective after the task switch.
3354 * @param fFlags The flags.
3355 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3356 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3357 * @param SelTSS The TSS selector of the new task.
3358 * @param pNewDescTSS Pointer to the new TSS descriptor.
3359 */
3360IEM_STATIC VBOXSTRICTRC
3361iemTaskSwitch(PVMCPU pVCpu,
3362 PCPUMCTX pCtx,
3363 IEMTASKSWITCH enmTaskSwitch,
3364 uint32_t uNextEip,
3365 uint32_t fFlags,
3366 uint16_t uErr,
3367 uint64_t uCr2,
3368 RTSEL SelTSS,
3369 PIEMSELDESC pNewDescTSS)
3370{
3371 Assert(!IEM_IS_REAL_MODE(pVCpu));
3372 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3373
3374 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3375 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3376 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3377 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3378 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3379
3380 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3381 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3382
3383 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3384 fIsNewTSS386, pCtx->eip, uNextEip));
3385
3386 /* Update CR2 in case it's a page-fault. */
3387 /** @todo This should probably be done much earlier in IEM/PGM. See
3388 * @bugref{5653#c49}. */
3389 if (fFlags & IEM_XCPT_FLAGS_CR2)
3390 pCtx->cr2 = uCr2;
3391
3392 /*
3393 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3394 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3395 */
3396 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3397 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3398 if (uNewTSSLimit < uNewTSSLimitMin)
3399 {
3400 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3401 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3402 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3403 }
3404
3405 /*
3406 * Check the current TSS limit. The last written byte to the current TSS during the
3407 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3408 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3409 *
3410 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3411 * end up with smaller than "legal" TSS limits.
3412 */
3413 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3414 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3415 if (uCurTSSLimit < uCurTSSLimitMin)
3416 {
3417 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3418 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3419 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3420 }
3421
3422 /*
3423 * Verify that the new TSS can be accessed and map it. Map only the required contents
3424 * and not the entire TSS.
3425 */
3426 void *pvNewTSS;
3427 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3428 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3429 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3430 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3431 * not perform correct translation if this happens. See Intel spec. 7.2.1
3432 * "Task-State Segment" */
3433 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3434 if (rcStrict != VINF_SUCCESS)
3435 {
3436 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3437 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3438 return rcStrict;
3439 }
3440
3441 /*
3442 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3443 */
3444 uint32_t u32EFlags = pCtx->eflags.u32;
3445 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3446 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3447 {
3448 PX86DESC pDescCurTSS;
3449 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3450 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3451 if (rcStrict != VINF_SUCCESS)
3452 {
3453 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3454 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3455 return rcStrict;
3456 }
3457
3458 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3459 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3460 if (rcStrict != VINF_SUCCESS)
3461 {
3462 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3463 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3464 return rcStrict;
3465 }
3466
3467 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3468 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3469 {
3470 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3471 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3472 u32EFlags &= ~X86_EFL_NT;
3473 }
3474 }
3475
3476 /*
3477 * Save the CPU state into the current TSS.
3478 */
3479 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3480 if (GCPtrNewTSS == GCPtrCurTSS)
3481 {
3482 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3483 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3484 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3485 }
3486 if (fIsNewTSS386)
3487 {
3488 /*
3489 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3490 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3491 */
3492 void *pvCurTSS32;
3493 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3494 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3495 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3496 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3497 if (rcStrict != VINF_SUCCESS)
3498 {
3499 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3500 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3501 return rcStrict;
3502 }
3503
3504 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3505 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3506 pCurTSS32->eip = uNextEip;
3507 pCurTSS32->eflags = u32EFlags;
3508 pCurTSS32->eax = pCtx->eax;
3509 pCurTSS32->ecx = pCtx->ecx;
3510 pCurTSS32->edx = pCtx->edx;
3511 pCurTSS32->ebx = pCtx->ebx;
3512 pCurTSS32->esp = pCtx->esp;
3513 pCurTSS32->ebp = pCtx->ebp;
3514 pCurTSS32->esi = pCtx->esi;
3515 pCurTSS32->edi = pCtx->edi;
3516 pCurTSS32->es = pCtx->es.Sel;
3517 pCurTSS32->cs = pCtx->cs.Sel;
3518 pCurTSS32->ss = pCtx->ss.Sel;
3519 pCurTSS32->ds = pCtx->ds.Sel;
3520 pCurTSS32->fs = pCtx->fs.Sel;
3521 pCurTSS32->gs = pCtx->gs.Sel;
3522
3523 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3524 if (rcStrict != VINF_SUCCESS)
3525 {
3526 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3527 VBOXSTRICTRC_VAL(rcStrict)));
3528 return rcStrict;
3529 }
3530 }
3531 else
3532 {
3533 /*
3534 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3535 */
3536 void *pvCurTSS16;
3537 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3538 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3539 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3540 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3541 if (rcStrict != VINF_SUCCESS)
3542 {
3543 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3544 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3545 return rcStrict;
3546 }
3547
3548 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3549 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3550 pCurTSS16->ip = uNextEip;
3551 pCurTSS16->flags = u32EFlags;
3552 pCurTSS16->ax = pCtx->ax;
3553 pCurTSS16->cx = pCtx->cx;
3554 pCurTSS16->dx = pCtx->dx;
3555 pCurTSS16->bx = pCtx->bx;
3556 pCurTSS16->sp = pCtx->sp;
3557 pCurTSS16->bp = pCtx->bp;
3558 pCurTSS16->si = pCtx->si;
3559 pCurTSS16->di = pCtx->di;
3560 pCurTSS16->es = pCtx->es.Sel;
3561 pCurTSS16->cs = pCtx->cs.Sel;
3562 pCurTSS16->ss = pCtx->ss.Sel;
3563 pCurTSS16->ds = pCtx->ds.Sel;
3564
3565 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3566 if (rcStrict != VINF_SUCCESS)
3567 {
3568 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3569 VBOXSTRICTRC_VAL(rcStrict)));
3570 return rcStrict;
3571 }
3572 }
3573
3574 /*
3575 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3576 */
3577 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3578 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3579 {
3580 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3581 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3582 pNewTSS->selPrev = pCtx->tr.Sel;
3583 }
3584
3585 /*
3586 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3587 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3588 */
3589 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3590 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3591 bool fNewDebugTrap;
3592 if (fIsNewTSS386)
3593 {
3594 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3595 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3596 uNewEip = pNewTSS32->eip;
3597 uNewEflags = pNewTSS32->eflags;
3598 uNewEax = pNewTSS32->eax;
3599 uNewEcx = pNewTSS32->ecx;
3600 uNewEdx = pNewTSS32->edx;
3601 uNewEbx = pNewTSS32->ebx;
3602 uNewEsp = pNewTSS32->esp;
3603 uNewEbp = pNewTSS32->ebp;
3604 uNewEsi = pNewTSS32->esi;
3605 uNewEdi = pNewTSS32->edi;
3606 uNewES = pNewTSS32->es;
3607 uNewCS = pNewTSS32->cs;
3608 uNewSS = pNewTSS32->ss;
3609 uNewDS = pNewTSS32->ds;
3610 uNewFS = pNewTSS32->fs;
3611 uNewGS = pNewTSS32->gs;
3612 uNewLdt = pNewTSS32->selLdt;
3613 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3614 }
3615 else
3616 {
3617 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3618 uNewCr3 = 0;
3619 uNewEip = pNewTSS16->ip;
3620 uNewEflags = pNewTSS16->flags;
3621 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3622 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3623 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3624 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3625 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3626 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3627 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3628 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3629 uNewES = pNewTSS16->es;
3630 uNewCS = pNewTSS16->cs;
3631 uNewSS = pNewTSS16->ss;
3632 uNewDS = pNewTSS16->ds;
3633 uNewFS = 0;
3634 uNewGS = 0;
3635 uNewLdt = pNewTSS16->selLdt;
3636 fNewDebugTrap = false;
3637 }
3638
3639 if (GCPtrNewTSS == GCPtrCurTSS)
3640 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3641 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3642
3643 /*
3644 * We're done accessing the new TSS.
3645 */
3646 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3647 if (rcStrict != VINF_SUCCESS)
3648 {
3649 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3650 return rcStrict;
3651 }
3652
3653 /*
3654 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3655 */
3656 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3657 {
3658 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3659 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3663 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3664 return rcStrict;
3665 }
3666
3667 /* Check that the descriptor indicates the new TSS is available (not busy). */
3668 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3669 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3670 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3671
3672 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3673 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3674 if (rcStrict != VINF_SUCCESS)
3675 {
3676 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3677 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3678 return rcStrict;
3679 }
3680 }
3681
3682 /*
3683 * From this point on, we're technically in the new task. We will defer exceptions
3684 * until the completion of the task switch but before executing any instructions in the new task.
3685 */
3686 pCtx->tr.Sel = SelTSS;
3687 pCtx->tr.ValidSel = SelTSS;
3688 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3689 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3690 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3691 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3692 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3693
3694 /* Set the busy bit in TR. */
3695 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3696 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3697 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3698 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3699 {
3700 uNewEflags |= X86_EFL_NT;
3701 }
3702
3703 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3704 pCtx->cr0 |= X86_CR0_TS;
3705 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3706
3707 pCtx->eip = uNewEip;
3708 pCtx->eax = uNewEax;
3709 pCtx->ecx = uNewEcx;
3710 pCtx->edx = uNewEdx;
3711 pCtx->ebx = uNewEbx;
3712 pCtx->esp = uNewEsp;
3713 pCtx->ebp = uNewEbp;
3714 pCtx->esi = uNewEsi;
3715 pCtx->edi = uNewEdi;
3716
3717 uNewEflags &= X86_EFL_LIVE_MASK;
3718 uNewEflags |= X86_EFL_RA1_MASK;
3719 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3720
3721 /*
3722 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3723 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3724 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3725 */
3726 pCtx->es.Sel = uNewES;
3727 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3728
3729 pCtx->cs.Sel = uNewCS;
3730 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3731
3732 pCtx->ss.Sel = uNewSS;
3733 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3734
3735 pCtx->ds.Sel = uNewDS;
3736 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3737
3738 pCtx->fs.Sel = uNewFS;
3739 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3740
3741 pCtx->gs.Sel = uNewGS;
3742 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3743 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3744
3745 pCtx->ldtr.Sel = uNewLdt;
3746 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3747 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3748 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3749
3750 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3751 {
3752 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3753 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3754 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3755 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3756 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3757 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3758 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3759 }
3760
3761 /*
3762 * Switch CR3 for the new task.
3763 */
3764 if ( fIsNewTSS386
3765 && (pCtx->cr0 & X86_CR0_PG))
3766 {
3767 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3768 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3769 {
3770 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3771 AssertRCSuccessReturn(rc, rc);
3772 }
3773 else
3774 pCtx->cr3 = uNewCr3;
3775
3776 /* Inform PGM. */
3777 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3778 {
3779 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3780 AssertRCReturn(rc, rc);
3781 /* ignore informational status codes */
3782 }
3783 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3784 }
3785
3786 /*
3787 * Switch LDTR for the new task.
3788 */
3789 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3790 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
3791 else
3792 {
3793 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3794
3795 IEMSELDESC DescNewLdt;
3796 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3797 if (rcStrict != VINF_SUCCESS)
3798 {
3799 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3800 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3801 return rcStrict;
3802 }
3803 if ( !DescNewLdt.Legacy.Gen.u1Present
3804 || DescNewLdt.Legacy.Gen.u1DescType
3805 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3806 {
3807 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3808 uNewLdt, DescNewLdt.Legacy.u));
3809 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3810 }
3811
3812 pCtx->ldtr.ValidSel = uNewLdt;
3813 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3814 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3815 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3816 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3817 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3818 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3819 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
3820 }
3821
3822 IEMSELDESC DescSS;
3823 if (IEM_IS_V86_MODE(pVCpu))
3824 {
3825 pVCpu->iem.s.uCpl = 3;
3826 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
3827 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
3828 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
3829 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
3830 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
3831 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
3832 }
3833 else
3834 {
3835 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3836
3837 /*
3838 * Load the stack segment for the new task.
3839 */
3840 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3841 {
3842 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3843 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3844 }
3845
3846 /* Fetch the descriptor. */
3847 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3848 if (rcStrict != VINF_SUCCESS)
3849 {
3850 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3851 VBOXSTRICTRC_VAL(rcStrict)));
3852 return rcStrict;
3853 }
3854
3855 /* SS must be a data segment and writable. */
3856 if ( !DescSS.Legacy.Gen.u1DescType
3857 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3858 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3859 {
3860 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3861 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3862 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3863 }
3864
3865 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3866 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3867 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3868 {
3869 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3870 uNewCpl));
3871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3872 }
3873
3874 /* Is it there? */
3875 if (!DescSS.Legacy.Gen.u1Present)
3876 {
3877 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3878 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3879 }
3880
3881 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3882 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3883
3884 /* Set the accessed bit before committing the result into SS. */
3885 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3886 {
3887 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3888 if (rcStrict != VINF_SUCCESS)
3889 return rcStrict;
3890 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3891 }
3892
3893 /* Commit SS. */
3894 pCtx->ss.Sel = uNewSS;
3895 pCtx->ss.ValidSel = uNewSS;
3896 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3897 pCtx->ss.u32Limit = cbLimit;
3898 pCtx->ss.u64Base = u64Base;
3899 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
3901
3902 /* CPL has changed, update IEM before loading rest of segments. */
3903 pVCpu->iem.s.uCpl = uNewCpl;
3904
3905 /*
3906 * Load the data segments for the new task.
3907 */
3908 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
3909 if (rcStrict != VINF_SUCCESS)
3910 return rcStrict;
3911 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
3912 if (rcStrict != VINF_SUCCESS)
3913 return rcStrict;
3914 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
3915 if (rcStrict != VINF_SUCCESS)
3916 return rcStrict;
3917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920
3921 /*
3922 * Load the code segment for the new task.
3923 */
3924 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3925 {
3926 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3928 }
3929
3930 /* Fetch the descriptor. */
3931 IEMSELDESC DescCS;
3932 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3933 if (rcStrict != VINF_SUCCESS)
3934 {
3935 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3936 return rcStrict;
3937 }
3938
3939 /* CS must be a code segment. */
3940 if ( !DescCS.Legacy.Gen.u1DescType
3941 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3942 {
3943 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3944 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3945 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3946 }
3947
3948 /* For conforming CS, DPL must be less than or equal to the RPL. */
3949 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3950 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3951 {
3952 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3953 DescCS.Legacy.Gen.u2Dpl));
3954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3955 }
3956
3957 /* For non-conforming CS, DPL must match RPL. */
3958 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3959 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3960 {
3961 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3962 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3964 }
3965
3966 /* Is it there? */
3967 if (!DescCS.Legacy.Gen.u1Present)
3968 {
3969 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3970 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3971 }
3972
3973 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3974 u64Base = X86DESC_BASE(&DescCS.Legacy);
3975
3976 /* Set the accessed bit before committing the result into CS. */
3977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3978 {
3979 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3980 if (rcStrict != VINF_SUCCESS)
3981 return rcStrict;
3982 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3983 }
3984
3985 /* Commit CS. */
3986 pCtx->cs.Sel = uNewCS;
3987 pCtx->cs.ValidSel = uNewCS;
3988 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3989 pCtx->cs.u32Limit = cbLimit;
3990 pCtx->cs.u64Base = u64Base;
3991 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
3993 }
3994
3995 /** @todo Debug trap. */
3996 if (fIsNewTSS386 && fNewDebugTrap)
3997 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3998
3999 /*
4000 * Construct the error code masks based on what caused this task switch.
4001 * See Intel Instruction reference for INT.
4002 */
4003 uint16_t uExt;
4004 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4005 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4006 {
4007 uExt = 1;
4008 }
4009 else
4010 uExt = 0;
4011
4012 /*
4013 * Push any error code on to the new stack.
4014 */
4015 if (fFlags & IEM_XCPT_FLAGS_ERR)
4016 {
4017 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4018 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4019 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4020
4021 /* Check that there is sufficient space on the stack. */
4022 /** @todo Factor out segment limit checking for normal/expand down segments
4023 * into a separate function. */
4024 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4025 {
4026 if ( pCtx->esp - 1 > cbLimitSS
4027 || pCtx->esp < cbStackFrame)
4028 {
4029 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4030 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4031 cbStackFrame));
4032 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4033 }
4034 }
4035 else
4036 {
4037 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4038 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4039 {
4040 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4041 cbStackFrame));
4042 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4043 }
4044 }
4045
4046
4047 if (fIsNewTSS386)
4048 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4049 else
4050 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4051 if (rcStrict != VINF_SUCCESS)
4052 {
4053 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
4054 VBOXSTRICTRC_VAL(rcStrict)));
4055 return rcStrict;
4056 }
4057 }
4058
4059 /* Check the new EIP against the new CS limit. */
4060 if (pCtx->eip > pCtx->cs.u32Limit)
4061 {
4062 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4063 pCtx->eip, pCtx->cs.u32Limit));
4064 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4065 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4066 }
4067
4068 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4069 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4070}
4071
4072
4073/**
4074 * Implements exceptions and interrupts for protected mode.
4075 *
4076 * @returns VBox strict status code.
4077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4078 * @param pCtx The CPU context.
4079 * @param cbInstr The number of bytes to offset rIP by in the return
4080 * address.
4081 * @param u8Vector The interrupt / exception vector number.
4082 * @param fFlags The flags.
4083 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4084 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4085 */
4086IEM_STATIC VBOXSTRICTRC
4087iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4088 PCPUMCTX pCtx,
4089 uint8_t cbInstr,
4090 uint8_t u8Vector,
4091 uint32_t fFlags,
4092 uint16_t uErr,
4093 uint64_t uCr2)
4094{
4095 /*
4096 * Read the IDT entry.
4097 */
4098 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4099 {
4100 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4101 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4102 }
4103 X86DESC Idte;
4104 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4105 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4106 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4107 return rcStrict;
4108 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4109 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4110 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4111
4112 /*
4113 * Check the descriptor type, DPL and such.
4114 * ASSUMES this is done in the same order as described for call-gate calls.
4115 */
4116 if (Idte.Gate.u1DescType)
4117 {
4118 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4119 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4120 }
4121 bool fTaskGate = false;
4122 uint8_t f32BitGate = true;
4123 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4124 switch (Idte.Gate.u4Type)
4125 {
4126 case X86_SEL_TYPE_SYS_UNDEFINED:
4127 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4128 case X86_SEL_TYPE_SYS_LDT:
4129 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4130 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4131 case X86_SEL_TYPE_SYS_UNDEFINED2:
4132 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4133 case X86_SEL_TYPE_SYS_UNDEFINED3:
4134 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4135 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4136 case X86_SEL_TYPE_SYS_UNDEFINED4:
4137 {
4138 /** @todo check what actually happens when the type is wrong...
4139 * esp. call gates. */
4140 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4141 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4142 }
4143
4144 case X86_SEL_TYPE_SYS_286_INT_GATE:
4145 f32BitGate = false;
4146 case X86_SEL_TYPE_SYS_386_INT_GATE:
4147 fEflToClear |= X86_EFL_IF;
4148 break;
4149
4150 case X86_SEL_TYPE_SYS_TASK_GATE:
4151 fTaskGate = true;
4152#ifndef IEM_IMPLEMENTS_TASKSWITCH
4153 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4154#endif
4155 break;
4156
4157 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4158 f32BitGate = false;
4159 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4160 break;
4161
4162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4163 }
4164
4165 /* Check DPL against CPL if applicable. */
4166 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4167 {
4168 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4169 {
4170 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4171 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4172 }
4173 }
4174
4175 /* Is it there? */
4176 if (!Idte.Gate.u1Present)
4177 {
4178 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4179 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4180 }
4181
4182 /* Is it a task-gate? */
4183 if (fTaskGate)
4184 {
4185 /*
4186 * Construct the error code masks based on what caused this task switch.
4187 * See Intel Instruction reference for INT.
4188 */
4189 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4190 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4191 RTSEL SelTSS = Idte.Gate.u16Sel;
4192
4193 /*
4194 * Fetch the TSS descriptor in the GDT.
4195 */
4196 IEMSELDESC DescTSS;
4197 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4198 if (rcStrict != VINF_SUCCESS)
4199 {
4200 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4201 VBOXSTRICTRC_VAL(rcStrict)));
4202 return rcStrict;
4203 }
4204
4205 /* The TSS descriptor must be a system segment and be available (not busy). */
4206 if ( DescTSS.Legacy.Gen.u1DescType
4207 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4208 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4209 {
4210 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4211 u8Vector, SelTSS, DescTSS.Legacy.au64));
4212 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4213 }
4214
4215 /* The TSS must be present. */
4216 if (!DescTSS.Legacy.Gen.u1Present)
4217 {
4218 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4219 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4220 }
4221
4222 /* Do the actual task switch. */
4223 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4224 }
4225
4226 /* A null CS is bad. */
4227 RTSEL NewCS = Idte.Gate.u16Sel;
4228 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4229 {
4230 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4231 return iemRaiseGeneralProtectionFault0(pVCpu);
4232 }
4233
4234 /* Fetch the descriptor for the new CS. */
4235 IEMSELDESC DescCS;
4236 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4240 return rcStrict;
4241 }
4242
4243 /* Must be a code segment. */
4244 if (!DescCS.Legacy.Gen.u1DescType)
4245 {
4246 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4247 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4248 }
4249 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4250 {
4251 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4252 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4253 }
4254
4255 /* Don't allow lowering the privilege level. */
4256 /** @todo Does the lowering of privileges apply to software interrupts
4257 * only? This has bearings on the more-privileged or
4258 * same-privilege stack behavior further down. A testcase would
4259 * be nice. */
4260 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4261 {
4262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4263 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4264 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4265 }
4266
4267 /* Make sure the selector is present. */
4268 if (!DescCS.Legacy.Gen.u1Present)
4269 {
4270 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4271 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4272 }
4273
4274 /* Check the new EIP against the new CS limit. */
4275 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4276 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4277 ? Idte.Gate.u16OffsetLow
4278 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4279 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4280 if (uNewEip > cbLimitCS)
4281 {
4282 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4283 u8Vector, uNewEip, cbLimitCS, NewCS));
4284 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4285 }
4286
4287 /* Calc the flag image to push. */
4288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4290 fEfl &= ~X86_EFL_RF;
4291 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4293
4294 /* From V8086 mode only go to CPL 0. */
4295 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4296 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4297 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4298 {
4299 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4300 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4301 }
4302
4303 /*
4304 * If the privilege level changes, we need to get a new stack from the TSS.
4305 * This in turns means validating the new SS and ESP...
4306 */
4307 if (uNewCpl != pVCpu->iem.s.uCpl)
4308 {
4309 RTSEL NewSS;
4310 uint32_t uNewEsp;
4311 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4312 if (rcStrict != VINF_SUCCESS)
4313 return rcStrict;
4314
4315 IEMSELDESC DescSS;
4316 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4317 if (rcStrict != VINF_SUCCESS)
4318 return rcStrict;
4319
4320 /* Check that there is sufficient space for the stack frame. */
4321 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4322 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4323 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4324 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4325
4326 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4327 {
4328 if ( uNewEsp - 1 > cbLimitSS
4329 || uNewEsp < cbStackFrame)
4330 {
4331 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4332 u8Vector, NewSS, uNewEsp, cbStackFrame));
4333 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4334 }
4335 }
4336 else
4337 {
4338 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4339 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4340 {
4341 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4342 u8Vector, NewSS, uNewEsp, cbStackFrame));
4343 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4344 }
4345 }
4346
4347 /*
4348 * Start making changes.
4349 */
4350
4351 /* Set the new CPL so that stack accesses use it. */
4352 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4353 pVCpu->iem.s.uCpl = uNewCpl;
4354
4355 /* Create the stack frame. */
4356 RTPTRUNION uStackFrame;
4357 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4358 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4359 if (rcStrict != VINF_SUCCESS)
4360 return rcStrict;
4361 void * const pvStackFrame = uStackFrame.pv;
4362 if (f32BitGate)
4363 {
4364 if (fFlags & IEM_XCPT_FLAGS_ERR)
4365 *uStackFrame.pu32++ = uErr;
4366 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4367 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4368 uStackFrame.pu32[2] = fEfl;
4369 uStackFrame.pu32[3] = pCtx->esp;
4370 uStackFrame.pu32[4] = pCtx->ss.Sel;
4371 if (fEfl & X86_EFL_VM)
4372 {
4373 uStackFrame.pu32[1] = pCtx->cs.Sel;
4374 uStackFrame.pu32[5] = pCtx->es.Sel;
4375 uStackFrame.pu32[6] = pCtx->ds.Sel;
4376 uStackFrame.pu32[7] = pCtx->fs.Sel;
4377 uStackFrame.pu32[8] = pCtx->gs.Sel;
4378 }
4379 }
4380 else
4381 {
4382 if (fFlags & IEM_XCPT_FLAGS_ERR)
4383 *uStackFrame.pu16++ = uErr;
4384 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4385 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4386 uStackFrame.pu16[2] = fEfl;
4387 uStackFrame.pu16[3] = pCtx->sp;
4388 uStackFrame.pu16[4] = pCtx->ss.Sel;
4389 if (fEfl & X86_EFL_VM)
4390 {
4391 uStackFrame.pu16[1] = pCtx->cs.Sel;
4392 uStackFrame.pu16[5] = pCtx->es.Sel;
4393 uStackFrame.pu16[6] = pCtx->ds.Sel;
4394 uStackFrame.pu16[7] = pCtx->fs.Sel;
4395 uStackFrame.pu16[8] = pCtx->gs.Sel;
4396 }
4397 }
4398 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4399 if (rcStrict != VINF_SUCCESS)
4400 return rcStrict;
4401
4402 /* Mark the selectors 'accessed' (hope this is the correct time). */
4403 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4404 * after pushing the stack frame? (Write protect the gdt + stack to
4405 * find out.) */
4406 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4407 {
4408 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4409 if (rcStrict != VINF_SUCCESS)
4410 return rcStrict;
4411 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4412 }
4413
4414 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4415 {
4416 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4417 if (rcStrict != VINF_SUCCESS)
4418 return rcStrict;
4419 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4420 }
4421
4422 /*
4423 * Start comitting the register changes (joins with the DPL=CPL branch).
4424 */
4425 pCtx->ss.Sel = NewSS;
4426 pCtx->ss.ValidSel = NewSS;
4427 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4428 pCtx->ss.u32Limit = cbLimitSS;
4429 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4430 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4431 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4432 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4433 * SP is loaded).
4434 * Need to check the other combinations too:
4435 * - 16-bit TSS, 32-bit handler
4436 * - 32-bit TSS, 16-bit handler */
4437 if (!pCtx->ss.Attr.n.u1DefBig)
4438 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4439 else
4440 pCtx->rsp = uNewEsp - cbStackFrame;
4441
4442 if (fEfl & X86_EFL_VM)
4443 {
4444 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4445 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4446 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4447 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4448 }
4449 }
4450 /*
4451 * Same privilege, no stack change and smaller stack frame.
4452 */
4453 else
4454 {
4455 uint64_t uNewRsp;
4456 RTPTRUNION uStackFrame;
4457 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4458 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4459 if (rcStrict != VINF_SUCCESS)
4460 return rcStrict;
4461 void * const pvStackFrame = uStackFrame.pv;
4462
4463 if (f32BitGate)
4464 {
4465 if (fFlags & IEM_XCPT_FLAGS_ERR)
4466 *uStackFrame.pu32++ = uErr;
4467 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4468 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4469 uStackFrame.pu32[2] = fEfl;
4470 }
4471 else
4472 {
4473 if (fFlags & IEM_XCPT_FLAGS_ERR)
4474 *uStackFrame.pu16++ = uErr;
4475 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4476 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4477 uStackFrame.pu16[2] = fEfl;
4478 }
4479 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4480 if (rcStrict != VINF_SUCCESS)
4481 return rcStrict;
4482
4483 /* Mark the CS selector as 'accessed'. */
4484 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4485 {
4486 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4487 if (rcStrict != VINF_SUCCESS)
4488 return rcStrict;
4489 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4490 }
4491
4492 /*
4493 * Start committing the register changes (joins with the other branch).
4494 */
4495 pCtx->rsp = uNewRsp;
4496 }
4497
4498 /* ... register committing continues. */
4499 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4500 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4501 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4502 pCtx->cs.u32Limit = cbLimitCS;
4503 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4504 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4505
4506 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4507 fEfl &= ~fEflToClear;
4508 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4509
4510 if (fFlags & IEM_XCPT_FLAGS_CR2)
4511 pCtx->cr2 = uCr2;
4512
4513 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4514 iemRaiseXcptAdjustState(pCtx, u8Vector);
4515
4516 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4517}
4518
4519
4520/**
4521 * Implements exceptions and interrupts for long mode.
4522 *
4523 * @returns VBox strict status code.
4524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4525 * @param pCtx The CPU context.
4526 * @param cbInstr The number of bytes to offset rIP by in the return
4527 * address.
4528 * @param u8Vector The interrupt / exception vector number.
4529 * @param fFlags The flags.
4530 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4531 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4532 */
4533IEM_STATIC VBOXSTRICTRC
4534iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4535 PCPUMCTX pCtx,
4536 uint8_t cbInstr,
4537 uint8_t u8Vector,
4538 uint32_t fFlags,
4539 uint16_t uErr,
4540 uint64_t uCr2)
4541{
4542 /*
4543 * Read the IDT entry.
4544 */
4545 uint16_t offIdt = (uint16_t)u8Vector << 4;
4546 if (pCtx->idtr.cbIdt < offIdt + 7)
4547 {
4548 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4549 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4550 }
4551 X86DESC64 Idte;
4552 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4554 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4555 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4556 return rcStrict;
4557 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4558 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4559 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4560
4561 /*
4562 * Check the descriptor type, DPL and such.
4563 * ASSUMES this is done in the same order as described for call-gate calls.
4564 */
4565 if (Idte.Gate.u1DescType)
4566 {
4567 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4569 }
4570 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4571 switch (Idte.Gate.u4Type)
4572 {
4573 case AMD64_SEL_TYPE_SYS_INT_GATE:
4574 fEflToClear |= X86_EFL_IF;
4575 break;
4576 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4577 break;
4578
4579 default:
4580 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4581 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4582 }
4583
4584 /* Check DPL against CPL if applicable. */
4585 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4586 {
4587 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4588 {
4589 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4590 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4591 }
4592 }
4593
4594 /* Is it there? */
4595 if (!Idte.Gate.u1Present)
4596 {
4597 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4598 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4599 }
4600
4601 /* A null CS is bad. */
4602 RTSEL NewCS = Idte.Gate.u16Sel;
4603 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4604 {
4605 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4606 return iemRaiseGeneralProtectionFault0(pVCpu);
4607 }
4608
4609 /* Fetch the descriptor for the new CS. */
4610 IEMSELDESC DescCS;
4611 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4612 if (rcStrict != VINF_SUCCESS)
4613 {
4614 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4615 return rcStrict;
4616 }
4617
4618 /* Must be a 64-bit code segment. */
4619 if (!DescCS.Long.Gen.u1DescType)
4620 {
4621 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4622 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4623 }
4624 if ( !DescCS.Long.Gen.u1Long
4625 || DescCS.Long.Gen.u1DefBig
4626 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4627 {
4628 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4629 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4630 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4631 }
4632
4633 /* Don't allow lowering the privilege level. For non-conforming CS
4634 selectors, the CS.DPL sets the privilege level the trap/interrupt
4635 handler runs at. For conforming CS selectors, the CPL remains
4636 unchanged, but the CS.DPL must be <= CPL. */
4637 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4638 * when CPU in Ring-0. Result \#GP? */
4639 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4640 {
4641 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4642 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4643 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4644 }
4645
4646
4647 /* Make sure the selector is present. */
4648 if (!DescCS.Legacy.Gen.u1Present)
4649 {
4650 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4651 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4652 }
4653
4654 /* Check that the new RIP is canonical. */
4655 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4656 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4657 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4658 if (!IEM_IS_CANONICAL(uNewRip))
4659 {
4660 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4661 return iemRaiseGeneralProtectionFault0(pVCpu);
4662 }
4663
4664 /*
4665 * If the privilege level changes or if the IST isn't zero, we need to get
4666 * a new stack from the TSS.
4667 */
4668 uint64_t uNewRsp;
4669 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4670 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4671 if ( uNewCpl != pVCpu->iem.s.uCpl
4672 || Idte.Gate.u3IST != 0)
4673 {
4674 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4675 if (rcStrict != VINF_SUCCESS)
4676 return rcStrict;
4677 }
4678 else
4679 uNewRsp = pCtx->rsp;
4680 uNewRsp &= ~(uint64_t)0xf;
4681
4682 /*
4683 * Calc the flag image to push.
4684 */
4685 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4686 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4687 fEfl &= ~X86_EFL_RF;
4688 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4689 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4690
4691 /*
4692 * Start making changes.
4693 */
4694 /* Set the new CPL so that stack accesses use it. */
4695 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4696 pVCpu->iem.s.uCpl = uNewCpl;
4697
4698 /* Create the stack frame. */
4699 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4700 RTPTRUNION uStackFrame;
4701 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4702 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4703 if (rcStrict != VINF_SUCCESS)
4704 return rcStrict;
4705 void * const pvStackFrame = uStackFrame.pv;
4706
4707 if (fFlags & IEM_XCPT_FLAGS_ERR)
4708 *uStackFrame.pu64++ = uErr;
4709 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4710 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4711 uStackFrame.pu64[2] = fEfl;
4712 uStackFrame.pu64[3] = pCtx->rsp;
4713 uStackFrame.pu64[4] = pCtx->ss.Sel;
4714 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4715 if (rcStrict != VINF_SUCCESS)
4716 return rcStrict;
4717
4718 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4719 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4720 * after pushing the stack frame? (Write protect the gdt + stack to
4721 * find out.) */
4722 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4723 {
4724 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4725 if (rcStrict != VINF_SUCCESS)
4726 return rcStrict;
4727 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4728 }
4729
4730 /*
4731 * Start comitting the register changes.
4732 */
4733 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4734 * hidden registers when interrupting 32-bit or 16-bit code! */
4735 if (uNewCpl != uOldCpl)
4736 {
4737 pCtx->ss.Sel = 0 | uNewCpl;
4738 pCtx->ss.ValidSel = 0 | uNewCpl;
4739 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4740 pCtx->ss.u32Limit = UINT32_MAX;
4741 pCtx->ss.u64Base = 0;
4742 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4743 }
4744 pCtx->rsp = uNewRsp - cbStackFrame;
4745 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4746 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4747 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4748 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4749 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4750 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4751 pCtx->rip = uNewRip;
4752
4753 fEfl &= ~fEflToClear;
4754 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4755
4756 if (fFlags & IEM_XCPT_FLAGS_CR2)
4757 pCtx->cr2 = uCr2;
4758
4759 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4760 iemRaiseXcptAdjustState(pCtx, u8Vector);
4761
4762 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4763}
4764
4765
4766/**
4767 * Implements exceptions and interrupts.
4768 *
4769 * All exceptions and interrupts goes thru this function!
4770 *
4771 * @returns VBox strict status code.
4772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4773 * @param cbInstr The number of bytes to offset rIP by in the return
4774 * address.
4775 * @param u8Vector The interrupt / exception vector number.
4776 * @param fFlags The flags.
4777 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4778 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4779 */
4780DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
4781iemRaiseXcptOrInt(PVMCPU pVCpu,
4782 uint8_t cbInstr,
4783 uint8_t u8Vector,
4784 uint32_t fFlags,
4785 uint16_t uErr,
4786 uint64_t uCr2)
4787{
4788 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4789#ifdef IN_RING0
4790 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
4791 AssertRCReturn(rc, rc);
4792#endif
4793
4794#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4795 /*
4796 * Flush prefetch buffer
4797 */
4798 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4799#endif
4800
4801 /*
4802 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4803 */
4804 if ( pCtx->eflags.Bits.u1VM
4805 && pCtx->eflags.Bits.u2IOPL != 3
4806 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4807 && (pCtx->cr0 & X86_CR0_PE) )
4808 {
4809 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4810 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4811 u8Vector = X86_XCPT_GP;
4812 uErr = 0;
4813 }
4814#ifdef DBGFTRACE_ENABLED
4815 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4816 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4817 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
4818#endif
4819
4820 /*
4821 * Do recursion accounting.
4822 */
4823 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4824 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4825 if (pVCpu->iem.s.cXcptRecursions == 0)
4826 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4827 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4828 else
4829 {
4830 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4831 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4832
4833 /** @todo double and tripple faults. */
4834 if (pVCpu->iem.s.cXcptRecursions >= 3)
4835 {
4836#ifdef DEBUG_bird
4837 AssertFailed();
4838#endif
4839 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4840 }
4841
4842 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4843 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4844 {
4845 ....
4846 } */
4847 }
4848 pVCpu->iem.s.cXcptRecursions++;
4849 pVCpu->iem.s.uCurXcpt = u8Vector;
4850 pVCpu->iem.s.fCurXcpt = fFlags;
4851
4852 /*
4853 * Extensive logging.
4854 */
4855#if defined(LOG_ENABLED) && defined(IN_RING3)
4856 if (LogIs3Enabled())
4857 {
4858 PVM pVM = pVCpu->CTX_SUFF(pVM);
4859 char szRegs[4096];
4860 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4861 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4862 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4863 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4864 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4865 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4866 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4867 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4868 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4869 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4870 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4871 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4872 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4873 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4874 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4875 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4876 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4877 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4878 " efer=%016VR{efer}\n"
4879 " pat=%016VR{pat}\n"
4880 " sf_mask=%016VR{sf_mask}\n"
4881 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4882 " lstar=%016VR{lstar}\n"
4883 " star=%016VR{star} cstar=%016VR{cstar}\n"
4884 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4885 );
4886
4887 char szInstr[256];
4888 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4889 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4890 szInstr, sizeof(szInstr), NULL);
4891 Log3(("%s%s\n", szRegs, szInstr));
4892 }
4893#endif /* LOG_ENABLED */
4894
4895 /*
4896 * Call the mode specific worker function.
4897 */
4898 VBOXSTRICTRC rcStrict;
4899 if (!(pCtx->cr0 & X86_CR0_PE))
4900 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4901 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4902 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4903 else
4904 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4905
4906 /* Flush the prefetch buffer. */
4907#ifdef IEM_WITH_CODE_TLB
4908 pVCpu->iem.s.pbInstrBuf = NULL;
4909#else
4910 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4911#endif
4912
4913 /*
4914 * Unwind.
4915 */
4916 pVCpu->iem.s.cXcptRecursions--;
4917 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4918 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4919 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4920 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
4921 return rcStrict;
4922}
4923
4924#ifdef IEM_WITH_SETJMP
4925/**
4926 * See iemRaiseXcptOrInt. Will not return.
4927 */
4928IEM_STATIC DECL_NO_RETURN(void)
4929iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
4930 uint8_t cbInstr,
4931 uint8_t u8Vector,
4932 uint32_t fFlags,
4933 uint16_t uErr,
4934 uint64_t uCr2)
4935{
4936 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4937 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
4938}
4939#endif
4940
4941
4942/** \#DE - 00. */
4943DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
4944{
4945 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4946}
4947
4948
4949/** \#DB - 01.
4950 * @note This automatically clear DR7.GD. */
4951DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
4952{
4953 /** @todo set/clear RF. */
4954 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
4955 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4956}
4957
4958
4959/** \#UD - 06. */
4960DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
4961{
4962 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4963}
4964
4965
4966/** \#NM - 07. */
4967DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
4968{
4969 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4970}
4971
4972
4973/** \#TS(err) - 0a. */
4974DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
4975{
4976 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4977}
4978
4979
4980/** \#TS(tr) - 0a. */
4981DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
4982{
4983 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4984 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
4985}
4986
4987
4988/** \#TS(0) - 0a. */
4989DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
4990{
4991 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4992 0, 0);
4993}
4994
4995
4996/** \#TS(err) - 0a. */
4997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
4998{
4999 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5000 uSel & X86_SEL_MASK_OFF_RPL, 0);
5001}
5002
5003
5004/** \#NP(err) - 0b. */
5005DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5006{
5007 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5008}
5009
5010
5011/** \#NP(seg) - 0b. */
5012DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5013{
5014 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5015 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5016}
5017
5018
5019/** \#NP(sel) - 0b. */
5020DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5021{
5022 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5023 uSel & ~X86_SEL_RPL, 0);
5024}
5025
5026
5027/** \#SS(seg) - 0c. */
5028DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5029{
5030 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5031 uSel & ~X86_SEL_RPL, 0);
5032}
5033
5034
5035/** \#SS(err) - 0c. */
5036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5037{
5038 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5039}
5040
5041
5042/** \#GP(n) - 0d. */
5043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5044{
5045 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5046}
5047
5048
5049/** \#GP(0) - 0d. */
5050DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5051{
5052 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5053}
5054
5055#ifdef IEM_WITH_SETJMP
5056/** \#GP(0) - 0d. */
5057DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5058{
5059 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5060}
5061#endif
5062
5063
5064/** \#GP(sel) - 0d. */
5065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5066{
5067 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5068 Sel & ~X86_SEL_RPL, 0);
5069}
5070
5071
5072/** \#GP(0) - 0d. */
5073DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5074{
5075 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5076}
5077
5078
5079/** \#GP(sel) - 0d. */
5080DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5081{
5082 NOREF(iSegReg); NOREF(fAccess);
5083 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5084 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5085}
5086
5087#ifdef IEM_WITH_SETJMP
5088/** \#GP(sel) - 0d, longjmp. */
5089DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5090{
5091 NOREF(iSegReg); NOREF(fAccess);
5092 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5093 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5094}
5095#endif
5096
5097/** \#GP(sel) - 0d. */
5098DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5099{
5100 NOREF(Sel);
5101 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5102}
5103
5104#ifdef IEM_WITH_SETJMP
5105/** \#GP(sel) - 0d, longjmp. */
5106DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5107{
5108 NOREF(Sel);
5109 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5110}
5111#endif
5112
5113
5114/** \#GP(sel) - 0d. */
5115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5116{
5117 NOREF(iSegReg); NOREF(fAccess);
5118 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5119}
5120
5121#ifdef IEM_WITH_SETJMP
5122/** \#GP(sel) - 0d, longjmp. */
5123DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5124 uint32_t fAccess)
5125{
5126 NOREF(iSegReg); NOREF(fAccess);
5127 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5128}
5129#endif
5130
5131
5132/** \#PF(n) - 0e. */
5133DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5134{
5135 uint16_t uErr;
5136 switch (rc)
5137 {
5138 case VERR_PAGE_NOT_PRESENT:
5139 case VERR_PAGE_TABLE_NOT_PRESENT:
5140 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5141 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5142 uErr = 0;
5143 break;
5144
5145 default:
5146 AssertMsgFailed(("%Rrc\n", rc));
5147 case VERR_ACCESS_DENIED:
5148 uErr = X86_TRAP_PF_P;
5149 break;
5150
5151 /** @todo reserved */
5152 }
5153
5154 if (pVCpu->iem.s.uCpl == 3)
5155 uErr |= X86_TRAP_PF_US;
5156
5157 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5158 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5159 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5160 uErr |= X86_TRAP_PF_ID;
5161
5162#if 0 /* This is so much non-sense, really. Why was it done like that? */
5163 /* Note! RW access callers reporting a WRITE protection fault, will clear
5164 the READ flag before calling. So, read-modify-write accesses (RW)
5165 can safely be reported as READ faults. */
5166 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5167 uErr |= X86_TRAP_PF_RW;
5168#else
5169 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5170 {
5171 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5172 uErr |= X86_TRAP_PF_RW;
5173 }
5174#endif
5175
5176 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5177 uErr, GCPtrWhere);
5178}
5179
5180
5181/** \#MF(0) - 10. */
5182DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5183{
5184 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5185}
5186
5187
5188/** \#AC(0) - 11. */
5189DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5190{
5191 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5192}
5193
5194
5195/**
5196 * Macro for calling iemCImplRaiseDivideError().
5197 *
5198 * This enables us to add/remove arguments and force different levels of
5199 * inlining as we wish.
5200 *
5201 * @return Strict VBox status code.
5202 */
5203#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5204IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5205{
5206 NOREF(cbInstr);
5207 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5208}
5209
5210
5211/**
5212 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5213 *
5214 * This enables us to add/remove arguments and force different levels of
5215 * inlining as we wish.
5216 *
5217 * @return Strict VBox status code.
5218 */
5219#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5220IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5221{
5222 NOREF(cbInstr);
5223 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5224}
5225
5226
5227/**
5228 * Macro for calling iemCImplRaiseInvalidOpcode().
5229 *
5230 * This enables us to add/remove arguments and force different levels of
5231 * inlining as we wish.
5232 *
5233 * @return Strict VBox status code.
5234 */
5235#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5236IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5237{
5238 NOREF(cbInstr);
5239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5240}
5241
5242
5243/** @} */
5244
5245
5246/*
5247 *
5248 * Helpers routines.
5249 * Helpers routines.
5250 * Helpers routines.
5251 *
5252 */
5253
5254/**
5255 * Recalculates the effective operand size.
5256 *
5257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5258 */
5259IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5260{
5261 switch (pVCpu->iem.s.enmCpuMode)
5262 {
5263 case IEMMODE_16BIT:
5264 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5265 break;
5266 case IEMMODE_32BIT:
5267 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5268 break;
5269 case IEMMODE_64BIT:
5270 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5271 {
5272 case 0:
5273 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5274 break;
5275 case IEM_OP_PRF_SIZE_OP:
5276 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5277 break;
5278 case IEM_OP_PRF_SIZE_REX_W:
5279 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5280 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5281 break;
5282 }
5283 break;
5284 default:
5285 AssertFailed();
5286 }
5287}
5288
5289
5290/**
5291 * Sets the default operand size to 64-bit and recalculates the effective
5292 * operand size.
5293 *
5294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5295 */
5296IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5297{
5298 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5299 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5300 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5301 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5302 else
5303 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5304}
5305
5306
5307/*
5308 *
5309 * Common opcode decoders.
5310 * Common opcode decoders.
5311 * Common opcode decoders.
5312 *
5313 */
5314//#include <iprt/mem.h>
5315
5316/**
5317 * Used to add extra details about a stub case.
5318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5319 */
5320IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5321{
5322#if defined(LOG_ENABLED) && defined(IN_RING3)
5323 PVM pVM = pVCpu->CTX_SUFF(pVM);
5324 char szRegs[4096];
5325 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5326 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5327 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5328 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5329 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5330 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5331 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5332 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5333 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5334 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5335 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5336 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5337 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5338 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5339 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5340 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5341 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5342 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5343 " efer=%016VR{efer}\n"
5344 " pat=%016VR{pat}\n"
5345 " sf_mask=%016VR{sf_mask}\n"
5346 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5347 " lstar=%016VR{lstar}\n"
5348 " star=%016VR{star} cstar=%016VR{cstar}\n"
5349 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5350 );
5351
5352 char szInstr[256];
5353 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5354 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5355 szInstr, sizeof(szInstr), NULL);
5356
5357 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5358#else
5359 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5360#endif
5361}
5362
5363/**
5364 * Complains about a stub.
5365 *
5366 * Providing two versions of this macro, one for daily use and one for use when
5367 * working on IEM.
5368 */
5369#if 0
5370# define IEMOP_BITCH_ABOUT_STUB() \
5371 do { \
5372 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5373 iemOpStubMsg2(pVCpu); \
5374 RTAssertPanic(); \
5375 } while (0)
5376#else
5377# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5378#endif
5379
5380/** Stubs an opcode. */
5381#define FNIEMOP_STUB(a_Name) \
5382 FNIEMOP_DEF(a_Name) \
5383 { \
5384 IEMOP_BITCH_ABOUT_STUB(); \
5385 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5386 } \
5387 typedef int ignore_semicolon
5388
5389/** Stubs an opcode. */
5390#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5391 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5392 { \
5393 IEMOP_BITCH_ABOUT_STUB(); \
5394 NOREF(a_Name0); \
5395 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5396 } \
5397 typedef int ignore_semicolon
5398
5399/** Stubs an opcode which currently should raise \#UD. */
5400#define FNIEMOP_UD_STUB(a_Name) \
5401 FNIEMOP_DEF(a_Name) \
5402 { \
5403 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5404 return IEMOP_RAISE_INVALID_OPCODE(); \
5405 } \
5406 typedef int ignore_semicolon
5407
5408/** Stubs an opcode which currently should raise \#UD. */
5409#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5410 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5411 { \
5412 NOREF(a_Name0); \
5413 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5414 return IEMOP_RAISE_INVALID_OPCODE(); \
5415 } \
5416 typedef int ignore_semicolon
5417
5418
5419
5420/** @name Register Access.
5421 * @{
5422 */
5423
5424/**
5425 * Gets a reference (pointer) to the specified hidden segment register.
5426 *
5427 * @returns Hidden register reference.
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param iSegReg The segment register.
5430 */
5431IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5432{
5433 Assert(iSegReg < X86_SREG_COUNT);
5434 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5435 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5436
5437#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5438 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5439 { /* likely */ }
5440 else
5441 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5442#else
5443 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5444#endif
5445 return pSReg;
5446}
5447
5448
5449/**
5450 * Ensures that the given hidden segment register is up to date.
5451 *
5452 * @returns Hidden register reference.
5453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5454 * @param pSReg The segment register.
5455 */
5456IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5457{
5458#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5459 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5460 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5461#else
5462 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5463 NOREF(pVCpu);
5464#endif
5465 return pSReg;
5466}
5467
5468
5469/**
5470 * Gets a reference (pointer) to the specified segment register (the selector
5471 * value).
5472 *
5473 * @returns Pointer to the selector variable.
5474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5475 * @param iSegReg The segment register.
5476 */
5477DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5478{
5479 Assert(iSegReg < X86_SREG_COUNT);
5480 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5481 return &pCtx->aSRegs[iSegReg].Sel;
5482}
5483
5484
5485/**
5486 * Fetches the selector value of a segment register.
5487 *
5488 * @returns The selector value.
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param iSegReg The segment register.
5491 */
5492DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5493{
5494 Assert(iSegReg < X86_SREG_COUNT);
5495 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5496}
5497
5498
5499/**
5500 * Gets a reference (pointer) to the specified general purpose register.
5501 *
5502 * @returns Register reference.
5503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5504 * @param iReg The general purpose register.
5505 */
5506DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5507{
5508 Assert(iReg < 16);
5509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5510 return &pCtx->aGRegs[iReg];
5511}
5512
5513
5514/**
5515 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5516 *
5517 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5518 *
5519 * @returns Register reference.
5520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5521 * @param iReg The register.
5522 */
5523DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5524{
5525 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5526 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5527 {
5528 Assert(iReg < 16);
5529 return &pCtx->aGRegs[iReg].u8;
5530 }
5531 /* high 8-bit register. */
5532 Assert(iReg < 8);
5533 return &pCtx->aGRegs[iReg & 3].bHi;
5534}
5535
5536
5537/**
5538 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5539 *
5540 * @returns Register reference.
5541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5542 * @param iReg The register.
5543 */
5544DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5545{
5546 Assert(iReg < 16);
5547 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5548 return &pCtx->aGRegs[iReg].u16;
5549}
5550
5551
5552/**
5553 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5554 *
5555 * @returns Register reference.
5556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5557 * @param iReg The register.
5558 */
5559DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5560{
5561 Assert(iReg < 16);
5562 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5563 return &pCtx->aGRegs[iReg].u32;
5564}
5565
5566
5567/**
5568 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5569 *
5570 * @returns Register reference.
5571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5572 * @param iReg The register.
5573 */
5574DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5575{
5576 Assert(iReg < 64);
5577 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5578 return &pCtx->aGRegs[iReg].u64;
5579}
5580
5581
5582/**
5583 * Fetches the value of a 8-bit general purpose register.
5584 *
5585 * @returns The register value.
5586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5587 * @param iReg The register.
5588 */
5589DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5590{
5591 return *iemGRegRefU8(pVCpu, iReg);
5592}
5593
5594
5595/**
5596 * Fetches the value of a 16-bit general purpose register.
5597 *
5598 * @returns The register value.
5599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5600 * @param iReg The register.
5601 */
5602DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5603{
5604 Assert(iReg < 16);
5605 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5606}
5607
5608
5609/**
5610 * Fetches the value of a 32-bit general purpose register.
5611 *
5612 * @returns The register value.
5613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5614 * @param iReg The register.
5615 */
5616DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5617{
5618 Assert(iReg < 16);
5619 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5620}
5621
5622
5623/**
5624 * Fetches the value of a 64-bit general purpose register.
5625 *
5626 * @returns The register value.
5627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5628 * @param iReg The register.
5629 */
5630DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5631{
5632 Assert(iReg < 16);
5633 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5634}
5635
5636
5637/**
5638 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5639 *
5640 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5641 * segment limit.
5642 *
5643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5644 * @param offNextInstr The offset of the next instruction.
5645 */
5646IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5647{
5648 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5649 switch (pVCpu->iem.s.enmEffOpSize)
5650 {
5651 case IEMMODE_16BIT:
5652 {
5653 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5654 if ( uNewIp > pCtx->cs.u32Limit
5655 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5656 return iemRaiseGeneralProtectionFault0(pVCpu);
5657 pCtx->rip = uNewIp;
5658 break;
5659 }
5660
5661 case IEMMODE_32BIT:
5662 {
5663 Assert(pCtx->rip <= UINT32_MAX);
5664 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5665
5666 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5667 if (uNewEip > pCtx->cs.u32Limit)
5668 return iemRaiseGeneralProtectionFault0(pVCpu);
5669 pCtx->rip = uNewEip;
5670 break;
5671 }
5672
5673 case IEMMODE_64BIT:
5674 {
5675 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5676
5677 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5678 if (!IEM_IS_CANONICAL(uNewRip))
5679 return iemRaiseGeneralProtectionFault0(pVCpu);
5680 pCtx->rip = uNewRip;
5681 break;
5682 }
5683
5684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5685 }
5686
5687 pCtx->eflags.Bits.u1RF = 0;
5688
5689#ifndef IEM_WITH_CODE_TLB
5690 /* Flush the prefetch buffer. */
5691 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5692#endif
5693
5694 return VINF_SUCCESS;
5695}
5696
5697
5698/**
5699 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5700 *
5701 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5702 * segment limit.
5703 *
5704 * @returns Strict VBox status code.
5705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5706 * @param offNextInstr The offset of the next instruction.
5707 */
5708IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5709{
5710 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5711 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5712
5713 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5714 if ( uNewIp > pCtx->cs.u32Limit
5715 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5716 return iemRaiseGeneralProtectionFault0(pVCpu);
5717 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5718 pCtx->rip = uNewIp;
5719 pCtx->eflags.Bits.u1RF = 0;
5720
5721#ifndef IEM_WITH_CODE_TLB
5722 /* Flush the prefetch buffer. */
5723 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5724#endif
5725
5726 return VINF_SUCCESS;
5727}
5728
5729
5730/**
5731 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5732 *
5733 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5734 * segment limit.
5735 *
5736 * @returns Strict VBox status code.
5737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5738 * @param offNextInstr The offset of the next instruction.
5739 */
5740IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5741{
5742 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5743 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5744
5745 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5746 {
5747 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5748
5749 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5750 if (uNewEip > pCtx->cs.u32Limit)
5751 return iemRaiseGeneralProtectionFault0(pVCpu);
5752 pCtx->rip = uNewEip;
5753 }
5754 else
5755 {
5756 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5757
5758 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5759 if (!IEM_IS_CANONICAL(uNewRip))
5760 return iemRaiseGeneralProtectionFault0(pVCpu);
5761 pCtx->rip = uNewRip;
5762 }
5763 pCtx->eflags.Bits.u1RF = 0;
5764
5765#ifndef IEM_WITH_CODE_TLB
5766 /* Flush the prefetch buffer. */
5767 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5768#endif
5769
5770 return VINF_SUCCESS;
5771}
5772
5773
5774/**
5775 * Performs a near jump to the specified address.
5776 *
5777 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5778 * segment limit.
5779 *
5780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5781 * @param uNewRip The new RIP value.
5782 */
5783IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
5784{
5785 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5786 switch (pVCpu->iem.s.enmEffOpSize)
5787 {
5788 case IEMMODE_16BIT:
5789 {
5790 Assert(uNewRip <= UINT16_MAX);
5791 if ( uNewRip > pCtx->cs.u32Limit
5792 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5793 return iemRaiseGeneralProtectionFault0(pVCpu);
5794 /** @todo Test 16-bit jump in 64-bit mode. */
5795 pCtx->rip = uNewRip;
5796 break;
5797 }
5798
5799 case IEMMODE_32BIT:
5800 {
5801 Assert(uNewRip <= UINT32_MAX);
5802 Assert(pCtx->rip <= UINT32_MAX);
5803 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5804
5805 if (uNewRip > pCtx->cs.u32Limit)
5806 return iemRaiseGeneralProtectionFault0(pVCpu);
5807 pCtx->rip = uNewRip;
5808 break;
5809 }
5810
5811 case IEMMODE_64BIT:
5812 {
5813 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5814
5815 if (!IEM_IS_CANONICAL(uNewRip))
5816 return iemRaiseGeneralProtectionFault0(pVCpu);
5817 pCtx->rip = uNewRip;
5818 break;
5819 }
5820
5821 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5822 }
5823
5824 pCtx->eflags.Bits.u1RF = 0;
5825
5826#ifndef IEM_WITH_CODE_TLB
5827 /* Flush the prefetch buffer. */
5828 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5829#endif
5830
5831 return VINF_SUCCESS;
5832}
5833
5834
5835/**
5836 * Get the address of the top of the stack.
5837 *
5838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5839 * @param pCtx The CPU context which SP/ESP/RSP should be
5840 * read.
5841 */
5842DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
5843{
5844 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5845 return pCtx->rsp;
5846 if (pCtx->ss.Attr.n.u1DefBig)
5847 return pCtx->esp;
5848 return pCtx->sp;
5849}
5850
5851
5852/**
5853 * Updates the RIP/EIP/IP to point to the next instruction.
5854 *
5855 * This function leaves the EFLAGS.RF flag alone.
5856 *
5857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5858 * @param cbInstr The number of bytes to add.
5859 */
5860IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
5861{
5862 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5863 switch (pVCpu->iem.s.enmCpuMode)
5864 {
5865 case IEMMODE_16BIT:
5866 Assert(pCtx->rip <= UINT16_MAX);
5867 pCtx->eip += cbInstr;
5868 pCtx->eip &= UINT32_C(0xffff);
5869 break;
5870
5871 case IEMMODE_32BIT:
5872 pCtx->eip += cbInstr;
5873 Assert(pCtx->rip <= UINT32_MAX);
5874 break;
5875
5876 case IEMMODE_64BIT:
5877 pCtx->rip += cbInstr;
5878 break;
5879 default: AssertFailed();
5880 }
5881}
5882
5883
5884#if 0
5885/**
5886 * Updates the RIP/EIP/IP to point to the next instruction.
5887 *
5888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5889 */
5890IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
5891{
5892 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
5893}
5894#endif
5895
5896
5897
5898/**
5899 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
5900 *
5901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5902 * @param cbInstr The number of bytes to add.
5903 */
5904IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
5905{
5906 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5907
5908 pCtx->eflags.Bits.u1RF = 0;
5909
5910 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
5911#if ARCH_BITS >= 64
5912 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
5913 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
5914 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
5915#else
5916 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5917 pCtx->rip += cbInstr;
5918 else
5919 {
5920 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
5921 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
5922 }
5923#endif
5924}
5925
5926
5927/**
5928 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
5929 *
5930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5931 */
5932IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
5933{
5934 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
5935}
5936
5937
5938/**
5939 * Adds to the stack pointer.
5940 *
5941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5942 * @param pCtx The CPU context which SP/ESP/RSP should be
5943 * updated.
5944 * @param cbToAdd The number of bytes to add (8-bit!).
5945 */
5946DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5947{
5948 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5949 pCtx->rsp += cbToAdd;
5950 else if (pCtx->ss.Attr.n.u1DefBig)
5951 pCtx->esp += cbToAdd;
5952 else
5953 pCtx->sp += cbToAdd;
5954}
5955
5956
5957/**
5958 * Subtracts from the stack pointer.
5959 *
5960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5961 * @param pCtx The CPU context which SP/ESP/RSP should be
5962 * updated.
5963 * @param cbToSub The number of bytes to subtract (8-bit!).
5964 */
5965DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5966{
5967 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5968 pCtx->rsp -= cbToSub;
5969 else if (pCtx->ss.Attr.n.u1DefBig)
5970 pCtx->esp -= cbToSub;
5971 else
5972 pCtx->sp -= cbToSub;
5973}
5974
5975
5976/**
5977 * Adds to the temporary stack pointer.
5978 *
5979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5980 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5981 * @param cbToAdd The number of bytes to add (16-bit).
5982 * @param pCtx Where to get the current stack mode.
5983 */
5984DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5985{
5986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5987 pTmpRsp->u += cbToAdd;
5988 else if (pCtx->ss.Attr.n.u1DefBig)
5989 pTmpRsp->DWords.dw0 += cbToAdd;
5990 else
5991 pTmpRsp->Words.w0 += cbToAdd;
5992}
5993
5994
5995/**
5996 * Subtracts from the temporary stack pointer.
5997 *
5998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5999 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6000 * @param cbToSub The number of bytes to subtract.
6001 * @param pCtx Where to get the current stack mode.
6002 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6003 * expecting that.
6004 */
6005DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6006{
6007 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6008 pTmpRsp->u -= cbToSub;
6009 else if (pCtx->ss.Attr.n.u1DefBig)
6010 pTmpRsp->DWords.dw0 -= cbToSub;
6011 else
6012 pTmpRsp->Words.w0 -= cbToSub;
6013}
6014
6015
6016/**
6017 * Calculates the effective stack address for a push of the specified size as
6018 * well as the new RSP value (upper bits may be masked).
6019 *
6020 * @returns Effective stack addressf for the push.
6021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6022 * @param pCtx Where to get the current stack mode.
6023 * @param cbItem The size of the stack item to pop.
6024 * @param puNewRsp Where to return the new RSP value.
6025 */
6026DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6027{
6028 RTUINT64U uTmpRsp;
6029 RTGCPTR GCPtrTop;
6030 uTmpRsp.u = pCtx->rsp;
6031
6032 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6033 GCPtrTop = uTmpRsp.u -= cbItem;
6034 else if (pCtx->ss.Attr.n.u1DefBig)
6035 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6036 else
6037 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6038 *puNewRsp = uTmpRsp.u;
6039 return GCPtrTop;
6040}
6041
6042
6043/**
6044 * Gets the current stack pointer and calculates the value after a pop of the
6045 * specified size.
6046 *
6047 * @returns Current stack pointer.
6048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6049 * @param pCtx Where to get the current stack mode.
6050 * @param cbItem The size of the stack item to pop.
6051 * @param puNewRsp Where to return the new RSP value.
6052 */
6053DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6054{
6055 RTUINT64U uTmpRsp;
6056 RTGCPTR GCPtrTop;
6057 uTmpRsp.u = pCtx->rsp;
6058
6059 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6060 {
6061 GCPtrTop = uTmpRsp.u;
6062 uTmpRsp.u += cbItem;
6063 }
6064 else if (pCtx->ss.Attr.n.u1DefBig)
6065 {
6066 GCPtrTop = uTmpRsp.DWords.dw0;
6067 uTmpRsp.DWords.dw0 += cbItem;
6068 }
6069 else
6070 {
6071 GCPtrTop = uTmpRsp.Words.w0;
6072 uTmpRsp.Words.w0 += cbItem;
6073 }
6074 *puNewRsp = uTmpRsp.u;
6075 return GCPtrTop;
6076}
6077
6078
6079/**
6080 * Calculates the effective stack address for a push of the specified size as
6081 * well as the new temporary RSP value (upper bits may be masked).
6082 *
6083 * @returns Effective stack addressf for the push.
6084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6085 * @param pCtx Where to get the current stack mode.
6086 * @param pTmpRsp The temporary stack pointer. This is updated.
6087 * @param cbItem The size of the stack item to pop.
6088 */
6089DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6090{
6091 RTGCPTR GCPtrTop;
6092
6093 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6094 GCPtrTop = pTmpRsp->u -= cbItem;
6095 else if (pCtx->ss.Attr.n.u1DefBig)
6096 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6097 else
6098 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6099 return GCPtrTop;
6100}
6101
6102
6103/**
6104 * Gets the effective stack address for a pop of the specified size and
6105 * calculates and updates the temporary RSP.
6106 *
6107 * @returns Current stack pointer.
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 * @param pCtx Where to get the current stack mode.
6110 * @param pTmpRsp The temporary stack pointer. This is updated.
6111 * @param cbItem The size of the stack item to pop.
6112 */
6113DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6114{
6115 RTGCPTR GCPtrTop;
6116 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6117 {
6118 GCPtrTop = pTmpRsp->u;
6119 pTmpRsp->u += cbItem;
6120 }
6121 else if (pCtx->ss.Attr.n.u1DefBig)
6122 {
6123 GCPtrTop = pTmpRsp->DWords.dw0;
6124 pTmpRsp->DWords.dw0 += cbItem;
6125 }
6126 else
6127 {
6128 GCPtrTop = pTmpRsp->Words.w0;
6129 pTmpRsp->Words.w0 += cbItem;
6130 }
6131 return GCPtrTop;
6132}
6133
6134/** @} */
6135
6136
6137/** @name FPU access and helpers.
6138 *
6139 * @{
6140 */
6141
6142
6143/**
6144 * Hook for preparing to use the host FPU.
6145 *
6146 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6147 *
6148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6149 */
6150DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6151{
6152#ifdef IN_RING3
6153 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6154#else
6155 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6156#endif
6157}
6158
6159
6160/**
6161 * Hook for preparing to use the host FPU for SSE
6162 *
6163 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6164 *
6165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6166 */
6167DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6168{
6169 iemFpuPrepareUsage(pVCpu);
6170}
6171
6172
6173/**
6174 * Hook for actualizing the guest FPU state before the interpreter reads it.
6175 *
6176 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6177 *
6178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6179 */
6180DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6181{
6182#ifdef IN_RING3
6183 NOREF(pVCpu);
6184#else
6185 CPUMRZFpuStateActualizeForRead(pVCpu);
6186#endif
6187}
6188
6189
6190/**
6191 * Hook for actualizing the guest FPU state before the interpreter changes it.
6192 *
6193 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6194 *
6195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6196 */
6197DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6198{
6199#ifdef IN_RING3
6200 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6201#else
6202 CPUMRZFpuStateActualizeForChange(pVCpu);
6203#endif
6204}
6205
6206
6207/**
6208 * Hook for actualizing the guest XMM0..15 register state for read only.
6209 *
6210 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6211 *
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 */
6214DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6215{
6216#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6217 NOREF(pVCpu);
6218#else
6219 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6220#endif
6221}
6222
6223
6224/**
6225 * Hook for actualizing the guest XMM0..15 register state for read+write.
6226 *
6227 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6228 *
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 */
6231DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6232{
6233#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6234 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6235#else
6236 CPUMRZFpuStateActualizeForChange(pVCpu);
6237#endif
6238}
6239
6240
6241/**
6242 * Stores a QNaN value into a FPU register.
6243 *
6244 * @param pReg Pointer to the register.
6245 */
6246DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6247{
6248 pReg->au32[0] = UINT32_C(0x00000000);
6249 pReg->au32[1] = UINT32_C(0xc0000000);
6250 pReg->au16[4] = UINT16_C(0xffff);
6251}
6252
6253
6254/**
6255 * Updates the FOP, FPU.CS and FPUIP registers.
6256 *
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param pCtx The CPU context.
6259 * @param pFpuCtx The FPU context.
6260 */
6261DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6262{
6263 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6264 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6265 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6266 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6267 {
6268 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6269 * happens in real mode here based on the fnsave and fnstenv images. */
6270 pFpuCtx->CS = 0;
6271 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6272 }
6273 else
6274 {
6275 pFpuCtx->CS = pCtx->cs.Sel;
6276 pFpuCtx->FPUIP = pCtx->rip;
6277 }
6278}
6279
6280
6281/**
6282 * Updates the x87.DS and FPUDP registers.
6283 *
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 * @param pCtx The CPU context.
6286 * @param pFpuCtx The FPU context.
6287 * @param iEffSeg The effective segment register.
6288 * @param GCPtrEff The effective address relative to @a iEffSeg.
6289 */
6290DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6291{
6292 RTSEL sel;
6293 switch (iEffSeg)
6294 {
6295 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6296 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6297 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6298 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6299 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6300 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6301 default:
6302 AssertMsgFailed(("%d\n", iEffSeg));
6303 sel = pCtx->ds.Sel;
6304 }
6305 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6306 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6307 {
6308 pFpuCtx->DS = 0;
6309 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6310 }
6311 else
6312 {
6313 pFpuCtx->DS = sel;
6314 pFpuCtx->FPUDP = GCPtrEff;
6315 }
6316}
6317
6318
6319/**
6320 * Rotates the stack registers in the push direction.
6321 *
6322 * @param pFpuCtx The FPU context.
6323 * @remarks This is a complete waste of time, but fxsave stores the registers in
6324 * stack order.
6325 */
6326DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6327{
6328 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6329 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6330 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6331 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6332 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6333 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6334 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6335 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6336 pFpuCtx->aRegs[0].r80 = r80Tmp;
6337}
6338
6339
6340/**
6341 * Rotates the stack registers in the pop direction.
6342 *
6343 * @param pFpuCtx The FPU context.
6344 * @remarks This is a complete waste of time, but fxsave stores the registers in
6345 * stack order.
6346 */
6347DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6348{
6349 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6350 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6351 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6352 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6353 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6354 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6355 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6356 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6357 pFpuCtx->aRegs[7].r80 = r80Tmp;
6358}
6359
6360
6361/**
6362 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6363 * exception prevents it.
6364 *
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param pResult The FPU operation result to push.
6367 * @param pFpuCtx The FPU context.
6368 */
6369IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6370{
6371 /* Update FSW and bail if there are pending exceptions afterwards. */
6372 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6373 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6374 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6375 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6376 {
6377 pFpuCtx->FSW = fFsw;
6378 return;
6379 }
6380
6381 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6382 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6383 {
6384 /* All is fine, push the actual value. */
6385 pFpuCtx->FTW |= RT_BIT(iNewTop);
6386 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6387 }
6388 else if (pFpuCtx->FCW & X86_FCW_IM)
6389 {
6390 /* Masked stack overflow, push QNaN. */
6391 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6392 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6393 }
6394 else
6395 {
6396 /* Raise stack overflow, don't push anything. */
6397 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6398 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6399 return;
6400 }
6401
6402 fFsw &= ~X86_FSW_TOP_MASK;
6403 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6404 pFpuCtx->FSW = fFsw;
6405
6406 iemFpuRotateStackPush(pFpuCtx);
6407}
6408
6409
6410/**
6411 * Stores a result in a FPU register and updates the FSW and FTW.
6412 *
6413 * @param pFpuCtx The FPU context.
6414 * @param pResult The result to store.
6415 * @param iStReg Which FPU register to store it in.
6416 */
6417IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6418{
6419 Assert(iStReg < 8);
6420 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6421 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6422 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6423 pFpuCtx->FTW |= RT_BIT(iReg);
6424 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6425}
6426
6427
6428/**
6429 * Only updates the FPU status word (FSW) with the result of the current
6430 * instruction.
6431 *
6432 * @param pFpuCtx The FPU context.
6433 * @param u16FSW The FSW output of the current instruction.
6434 */
6435IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6436{
6437 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6438 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6439}
6440
6441
6442/**
6443 * Pops one item off the FPU stack if no pending exception prevents it.
6444 *
6445 * @param pFpuCtx The FPU context.
6446 */
6447IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6448{
6449 /* Check pending exceptions. */
6450 uint16_t uFSW = pFpuCtx->FSW;
6451 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6452 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6453 return;
6454
6455 /* TOP--. */
6456 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6457 uFSW &= ~X86_FSW_TOP_MASK;
6458 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6459 pFpuCtx->FSW = uFSW;
6460
6461 /* Mark the previous ST0 as empty. */
6462 iOldTop >>= X86_FSW_TOP_SHIFT;
6463 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6464
6465 /* Rotate the registers. */
6466 iemFpuRotateStackPop(pFpuCtx);
6467}
6468
6469
6470/**
6471 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6472 *
6473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6474 * @param pResult The FPU operation result to push.
6475 */
6476IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6477{
6478 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6479 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6480 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6481 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6482}
6483
6484
6485/**
6486 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6487 * and sets FPUDP and FPUDS.
6488 *
6489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6490 * @param pResult The FPU operation result to push.
6491 * @param iEffSeg The effective segment register.
6492 * @param GCPtrEff The effective address relative to @a iEffSeg.
6493 */
6494IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6495{
6496 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6497 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6498 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6500 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6501}
6502
6503
6504/**
6505 * Replace ST0 with the first value and push the second onto the FPU stack,
6506 * unless a pending exception prevents it.
6507 *
6508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6509 * @param pResult The FPU operation result to store and push.
6510 */
6511IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6512{
6513 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6514 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6515 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6516
6517 /* Update FSW and bail if there are pending exceptions afterwards. */
6518 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6519 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6520 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6521 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6522 {
6523 pFpuCtx->FSW = fFsw;
6524 return;
6525 }
6526
6527 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6528 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6529 {
6530 /* All is fine, push the actual value. */
6531 pFpuCtx->FTW |= RT_BIT(iNewTop);
6532 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6533 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6534 }
6535 else if (pFpuCtx->FCW & X86_FCW_IM)
6536 {
6537 /* Masked stack overflow, push QNaN. */
6538 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6539 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6540 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6541 }
6542 else
6543 {
6544 /* Raise stack overflow, don't push anything. */
6545 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6546 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6547 return;
6548 }
6549
6550 fFsw &= ~X86_FSW_TOP_MASK;
6551 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6552 pFpuCtx->FSW = fFsw;
6553
6554 iemFpuRotateStackPush(pFpuCtx);
6555}
6556
6557
6558/**
6559 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6560 * FOP.
6561 *
6562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6563 * @param pResult The result to store.
6564 * @param iStReg Which FPU register to store it in.
6565 */
6566IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6567{
6568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6569 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6571 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6572}
6573
6574
6575/**
6576 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6577 * FOP, and then pops the stack.
6578 *
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 * @param pResult The result to store.
6581 * @param iStReg Which FPU register to store it in.
6582 */
6583IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6584{
6585 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6586 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6587 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6588 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6589 iemFpuMaybePopOne(pFpuCtx);
6590}
6591
6592
6593/**
6594 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6595 * FPUDP, and FPUDS.
6596 *
6597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6598 * @param pResult The result to store.
6599 * @param iStReg Which FPU register to store it in.
6600 * @param iEffSeg The effective memory operand selector register.
6601 * @param GCPtrEff The effective memory operand offset.
6602 */
6603IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6604 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6605{
6606 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6607 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6608 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6609 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6610 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6611}
6612
6613
6614/**
6615 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6616 * FPUDP, and FPUDS, and then pops the stack.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param pResult The result to store.
6620 * @param iStReg Which FPU register to store it in.
6621 * @param iEffSeg The effective memory operand selector register.
6622 * @param GCPtrEff The effective memory operand offset.
6623 */
6624IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6625 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6626{
6627 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6628 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6629 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6630 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6631 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6632 iemFpuMaybePopOne(pFpuCtx);
6633}
6634
6635
6636/**
6637 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6638 *
6639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6640 */
6641IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6642{
6643 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6644 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6646}
6647
6648
6649/**
6650 * Marks the specified stack register as free (for FFREE).
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param iStReg The register to free.
6654 */
6655IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6656{
6657 Assert(iStReg < 8);
6658 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6659 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6660 pFpuCtx->FTW &= ~RT_BIT(iReg);
6661}
6662
6663
6664/**
6665 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6666 *
6667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6668 */
6669IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6670{
6671 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6672 uint16_t uFsw = pFpuCtx->FSW;
6673 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6674 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6675 uFsw &= ~X86_FSW_TOP_MASK;
6676 uFsw |= uTop;
6677 pFpuCtx->FSW = uFsw;
6678}
6679
6680
6681/**
6682 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6683 *
6684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6685 */
6686IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6687{
6688 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6689 uint16_t uFsw = pFpuCtx->FSW;
6690 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6691 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6692 uFsw &= ~X86_FSW_TOP_MASK;
6693 uFsw |= uTop;
6694 pFpuCtx->FSW = uFsw;
6695}
6696
6697
6698/**
6699 * Updates the FSW, FOP, FPUIP, and FPUCS.
6700 *
6701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6702 * @param u16FSW The FSW from the current instruction.
6703 */
6704IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6705{
6706 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6707 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6708 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6709 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6710}
6711
6712
6713/**
6714 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param u16FSW The FSW from the current instruction.
6718 */
6719IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6720{
6721 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6722 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6723 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6724 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6725 iemFpuMaybePopOne(pFpuCtx);
6726}
6727
6728
6729/**
6730 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6731 *
6732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6733 * @param u16FSW The FSW from the current instruction.
6734 * @param iEffSeg The effective memory operand selector register.
6735 * @param GCPtrEff The effective memory operand offset.
6736 */
6737IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6738{
6739 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6740 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6741 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6742 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6743 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6744}
6745
6746
6747/**
6748 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6749 *
6750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6751 * @param u16FSW The FSW from the current instruction.
6752 */
6753IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6754{
6755 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6756 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6758 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6759 iemFpuMaybePopOne(pFpuCtx);
6760 iemFpuMaybePopOne(pFpuCtx);
6761}
6762
6763
6764/**
6765 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
6766 *
6767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6768 * @param u16FSW The FSW from the current instruction.
6769 * @param iEffSeg The effective memory operand selector register.
6770 * @param GCPtrEff The effective memory operand offset.
6771 */
6772IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6773{
6774 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6775 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6776 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6777 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6778 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6779 iemFpuMaybePopOne(pFpuCtx);
6780}
6781
6782
6783/**
6784 * Worker routine for raising an FPU stack underflow exception.
6785 *
6786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6787 * @param pFpuCtx The FPU context.
6788 * @param iStReg The stack register being accessed.
6789 */
6790IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
6791{
6792 Assert(iStReg < 8 || iStReg == UINT8_MAX);
6793 if (pFpuCtx->FCW & X86_FCW_IM)
6794 {
6795 /* Masked underflow. */
6796 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6797 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6798 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6799 if (iStReg != UINT8_MAX)
6800 {
6801 pFpuCtx->FTW |= RT_BIT(iReg);
6802 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6803 }
6804 }
6805 else
6806 {
6807 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6808 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6809 }
6810}
6811
6812
6813/**
6814 * Raises a FPU stack underflow exception.
6815 *
6816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6817 * @param iStReg The destination register that should be loaded
6818 * with QNaN if \#IS is not masked. Specify
6819 * UINT8_MAX if none (like for fcom).
6820 */
6821DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
6822{
6823 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6824 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6825 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6826 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6827}
6828
6829
6830DECL_NO_INLINE(IEM_STATIC, void)
6831iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6832{
6833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6834 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6835 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6836 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6837 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6838}
6839
6840
6841DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
6842{
6843 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6844 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6845 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6846 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6847 iemFpuMaybePopOne(pFpuCtx);
6848}
6849
6850
6851DECL_NO_INLINE(IEM_STATIC, void)
6852iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6853{
6854 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6855 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6856 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6857 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6858 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6859 iemFpuMaybePopOne(pFpuCtx);
6860}
6861
6862
6863DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
6864{
6865 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6866 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6867 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6868 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
6869 iemFpuMaybePopOne(pFpuCtx);
6870 iemFpuMaybePopOne(pFpuCtx);
6871}
6872
6873
6874DECL_NO_INLINE(IEM_STATIC, void)
6875iemFpuStackPushUnderflow(PVMCPU pVCpu)
6876{
6877 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6878 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6879 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6880
6881 if (pFpuCtx->FCW & X86_FCW_IM)
6882 {
6883 /* Masked overflow - Push QNaN. */
6884 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6885 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6886 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6887 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6888 pFpuCtx->FTW |= RT_BIT(iNewTop);
6889 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6890 iemFpuRotateStackPush(pFpuCtx);
6891 }
6892 else
6893 {
6894 /* Exception pending - don't change TOP or the register stack. */
6895 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6896 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6897 }
6898}
6899
6900
6901DECL_NO_INLINE(IEM_STATIC, void)
6902iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
6903{
6904 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6905 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6906 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6907
6908 if (pFpuCtx->FCW & X86_FCW_IM)
6909 {
6910 /* Masked overflow - Push QNaN. */
6911 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6912 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6913 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6914 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6915 pFpuCtx->FTW |= RT_BIT(iNewTop);
6916 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6917 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6918 iemFpuRotateStackPush(pFpuCtx);
6919 }
6920 else
6921 {
6922 /* Exception pending - don't change TOP or the register stack. */
6923 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6924 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6925 }
6926}
6927
6928
6929/**
6930 * Worker routine for raising an FPU stack overflow exception on a push.
6931 *
6932 * @param pFpuCtx The FPU context.
6933 */
6934IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
6935{
6936 if (pFpuCtx->FCW & X86_FCW_IM)
6937 {
6938 /* Masked overflow. */
6939 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6940 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6941 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6942 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6943 pFpuCtx->FTW |= RT_BIT(iNewTop);
6944 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6945 iemFpuRotateStackPush(pFpuCtx);
6946 }
6947 else
6948 {
6949 /* Exception pending - don't change TOP or the register stack. */
6950 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6951 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6952 }
6953}
6954
6955
6956/**
6957 * Raises a FPU stack overflow exception on a push.
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 */
6961DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
6962{
6963 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6964 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6965 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6966 iemFpuStackPushOverflowOnly(pFpuCtx);
6967}
6968
6969
6970/**
6971 * Raises a FPU stack overflow exception on a push with a memory operand.
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 * @param iEffSeg The effective memory operand selector register.
6975 * @param GCPtrEff The effective memory operand offset.
6976 */
6977DECL_NO_INLINE(IEM_STATIC, void)
6978iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6979{
6980 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6981 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6982 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6983 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6984 iemFpuStackPushOverflowOnly(pFpuCtx);
6985}
6986
6987
6988IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
6989{
6990 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6991 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6992 if (pFpuCtx->FTW & RT_BIT(iReg))
6993 return VINF_SUCCESS;
6994 return VERR_NOT_FOUND;
6995}
6996
6997
6998IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
6999{
7000 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7001 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7002 if (pFpuCtx->FTW & RT_BIT(iReg))
7003 {
7004 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7005 return VINF_SUCCESS;
7006 }
7007 return VERR_NOT_FOUND;
7008}
7009
7010
7011IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7012 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7013{
7014 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7015 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7016 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7017 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7018 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7019 {
7020 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7021 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7022 return VINF_SUCCESS;
7023 }
7024 return VERR_NOT_FOUND;
7025}
7026
7027
7028IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7029{
7030 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7031 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7032 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7033 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7034 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7035 {
7036 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7037 return VINF_SUCCESS;
7038 }
7039 return VERR_NOT_FOUND;
7040}
7041
7042
7043/**
7044 * Updates the FPU exception status after FCW is changed.
7045 *
7046 * @param pFpuCtx The FPU context.
7047 */
7048IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7049{
7050 uint16_t u16Fsw = pFpuCtx->FSW;
7051 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7052 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7053 else
7054 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7055 pFpuCtx->FSW = u16Fsw;
7056}
7057
7058
7059/**
7060 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7061 *
7062 * @returns The full FTW.
7063 * @param pFpuCtx The FPU context.
7064 */
7065IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7066{
7067 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7068 uint16_t u16Ftw = 0;
7069 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7070 for (unsigned iSt = 0; iSt < 8; iSt++)
7071 {
7072 unsigned const iReg = (iSt + iTop) & 7;
7073 if (!(u8Ftw & RT_BIT(iReg)))
7074 u16Ftw |= 3 << (iReg * 2); /* empty */
7075 else
7076 {
7077 uint16_t uTag;
7078 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7079 if (pr80Reg->s.uExponent == 0x7fff)
7080 uTag = 2; /* Exponent is all 1's => Special. */
7081 else if (pr80Reg->s.uExponent == 0x0000)
7082 {
7083 if (pr80Reg->s.u64Mantissa == 0x0000)
7084 uTag = 1; /* All bits are zero => Zero. */
7085 else
7086 uTag = 2; /* Must be special. */
7087 }
7088 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7089 uTag = 0; /* Valid. */
7090 else
7091 uTag = 2; /* Must be special. */
7092
7093 u16Ftw |= uTag << (iReg * 2); /* empty */
7094 }
7095 }
7096
7097 return u16Ftw;
7098}
7099
7100
7101/**
7102 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7103 *
7104 * @returns The compressed FTW.
7105 * @param u16FullFtw The full FTW to convert.
7106 */
7107IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7108{
7109 uint8_t u8Ftw = 0;
7110 for (unsigned i = 0; i < 8; i++)
7111 {
7112 if ((u16FullFtw & 3) != 3 /*empty*/)
7113 u8Ftw |= RT_BIT(i);
7114 u16FullFtw >>= 2;
7115 }
7116
7117 return u8Ftw;
7118}
7119
7120/** @} */
7121
7122
7123/** @name Memory access.
7124 *
7125 * @{
7126 */
7127
7128
7129/**
7130 * Updates the IEMCPU::cbWritten counter if applicable.
7131 *
7132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7133 * @param fAccess The access being accounted for.
7134 * @param cbMem The access size.
7135 */
7136DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7137{
7138 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7139 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7140 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7141}
7142
7143
7144/**
7145 * Checks if the given segment can be written to, raise the appropriate
7146 * exception if not.
7147 *
7148 * @returns VBox strict status code.
7149 *
7150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7151 * @param pHid Pointer to the hidden register.
7152 * @param iSegReg The register number.
7153 * @param pu64BaseAddr Where to return the base address to use for the
7154 * segment. (In 64-bit code it may differ from the
7155 * base in the hidden segment.)
7156 */
7157IEM_STATIC VBOXSTRICTRC
7158iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7159{
7160 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7161 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7162 else
7163 {
7164 if (!pHid->Attr.n.u1Present)
7165 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7166
7167 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7168 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7169 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7170 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7171 *pu64BaseAddr = pHid->u64Base;
7172 }
7173 return VINF_SUCCESS;
7174}
7175
7176
7177/**
7178 * Checks if the given segment can be read from, raise the appropriate
7179 * exception if not.
7180 *
7181 * @returns VBox strict status code.
7182 *
7183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7184 * @param pHid Pointer to the hidden register.
7185 * @param iSegReg The register number.
7186 * @param pu64BaseAddr Where to return the base address to use for the
7187 * segment. (In 64-bit code it may differ from the
7188 * base in the hidden segment.)
7189 */
7190IEM_STATIC VBOXSTRICTRC
7191iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7192{
7193 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7194 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7195 else
7196 {
7197 if (!pHid->Attr.n.u1Present)
7198 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7199
7200 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7201 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7202 *pu64BaseAddr = pHid->u64Base;
7203 }
7204 return VINF_SUCCESS;
7205}
7206
7207
7208/**
7209 * Applies the segment limit, base and attributes.
7210 *
7211 * This may raise a \#GP or \#SS.
7212 *
7213 * @returns VBox strict status code.
7214 *
7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7216 * @param fAccess The kind of access which is being performed.
7217 * @param iSegReg The index of the segment register to apply.
7218 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7219 * TSS, ++).
7220 * @param cbMem The access size.
7221 * @param pGCPtrMem Pointer to the guest memory address to apply
7222 * segmentation to. Input and output parameter.
7223 */
7224IEM_STATIC VBOXSTRICTRC
7225iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7226{
7227 if (iSegReg == UINT8_MAX)
7228 return VINF_SUCCESS;
7229
7230 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7231 switch (pVCpu->iem.s.enmCpuMode)
7232 {
7233 case IEMMODE_16BIT:
7234 case IEMMODE_32BIT:
7235 {
7236 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7237 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7238
7239 if ( pSel->Attr.n.u1Present
7240 && !pSel->Attr.n.u1Unusable)
7241 {
7242 Assert(pSel->Attr.n.u1DescType);
7243 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7244 {
7245 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7246 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7247 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7248
7249 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7250 {
7251 /** @todo CPL check. */
7252 }
7253
7254 /*
7255 * There are two kinds of data selectors, normal and expand down.
7256 */
7257 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7258 {
7259 if ( GCPtrFirst32 > pSel->u32Limit
7260 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7261 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7262 }
7263 else
7264 {
7265 /*
7266 * The upper boundary is defined by the B bit, not the G bit!
7267 */
7268 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7269 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7270 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7271 }
7272 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7273 }
7274 else
7275 {
7276
7277 /*
7278 * Code selector and usually be used to read thru, writing is
7279 * only permitted in real and V8086 mode.
7280 */
7281 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7282 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7283 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7284 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7285 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7286
7287 if ( GCPtrFirst32 > pSel->u32Limit
7288 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7289 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7290
7291 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7292 {
7293 /** @todo CPL check. */
7294 }
7295
7296 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7297 }
7298 }
7299 else
7300 return iemRaiseGeneralProtectionFault0(pVCpu);
7301 return VINF_SUCCESS;
7302 }
7303
7304 case IEMMODE_64BIT:
7305 {
7306 RTGCPTR GCPtrMem = *pGCPtrMem;
7307 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7308 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7309
7310 Assert(cbMem >= 1);
7311 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7312 return VINF_SUCCESS;
7313 return iemRaiseGeneralProtectionFault0(pVCpu);
7314 }
7315
7316 default:
7317 AssertFailedReturn(VERR_IEM_IPE_7);
7318 }
7319}
7320
7321
7322/**
7323 * Translates a virtual address to a physical physical address and checks if we
7324 * can access the page as specified.
7325 *
7326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7327 * @param GCPtrMem The virtual address.
7328 * @param fAccess The intended access.
7329 * @param pGCPhysMem Where to return the physical address.
7330 */
7331IEM_STATIC VBOXSTRICTRC
7332iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7333{
7334 /** @todo Need a different PGM interface here. We're currently using
7335 * generic / REM interfaces. this won't cut it for R0 & RC. */
7336 RTGCPHYS GCPhys;
7337 uint64_t fFlags;
7338 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7339 if (RT_FAILURE(rc))
7340 {
7341 /** @todo Check unassigned memory in unpaged mode. */
7342 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7343 *pGCPhysMem = NIL_RTGCPHYS;
7344 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7345 }
7346
7347 /* If the page is writable and does not have the no-exec bit set, all
7348 access is allowed. Otherwise we'll have to check more carefully... */
7349 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7350 {
7351 /* Write to read only memory? */
7352 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7353 && !(fFlags & X86_PTE_RW)
7354 && ( pVCpu->iem.s.uCpl != 0
7355 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7356 {
7357 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7358 *pGCPhysMem = NIL_RTGCPHYS;
7359 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7360 }
7361
7362 /* Kernel memory accessed by userland? */
7363 if ( !(fFlags & X86_PTE_US)
7364 && pVCpu->iem.s.uCpl == 3
7365 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7366 {
7367 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7368 *pGCPhysMem = NIL_RTGCPHYS;
7369 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7370 }
7371
7372 /* Executing non-executable memory? */
7373 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7374 && (fFlags & X86_PTE_PAE_NX)
7375 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7376 {
7377 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7378 *pGCPhysMem = NIL_RTGCPHYS;
7379 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7380 VERR_ACCESS_DENIED);
7381 }
7382 }
7383
7384 /*
7385 * Set the dirty / access flags.
7386 * ASSUMES this is set when the address is translated rather than on committ...
7387 */
7388 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7389 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7390 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7391 {
7392 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7393 AssertRC(rc2);
7394 }
7395
7396 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7397 *pGCPhysMem = GCPhys;
7398 return VINF_SUCCESS;
7399}
7400
7401
7402
7403/**
7404 * Maps a physical page.
7405 *
7406 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param GCPhysMem The physical address.
7409 * @param fAccess The intended access.
7410 * @param ppvMem Where to return the mapping address.
7411 * @param pLock The PGM lock.
7412 */
7413IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7414{
7415#ifdef IEM_VERIFICATION_MODE_FULL
7416 /* Force the alternative path so we can ignore writes. */
7417 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7418 {
7419 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7420 {
7421 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7422 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7423 if (RT_FAILURE(rc2))
7424 pVCpu->iem.s.fProblematicMemory = true;
7425 }
7426 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7427 }
7428#endif
7429#ifdef IEM_LOG_MEMORY_WRITES
7430 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7431 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7432#endif
7433#ifdef IEM_VERIFICATION_MODE_MINIMAL
7434 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7435#endif
7436
7437 /** @todo This API may require some improving later. A private deal with PGM
7438 * regarding locking and unlocking needs to be struct. A couple of TLBs
7439 * living in PGM, but with publicly accessible inlined access methods
7440 * could perhaps be an even better solution. */
7441 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7442 GCPhysMem,
7443 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7444 pVCpu->iem.s.fBypassHandlers,
7445 ppvMem,
7446 pLock);
7447 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7448 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7449
7450#ifdef IEM_VERIFICATION_MODE_FULL
7451 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7452 pVCpu->iem.s.fProblematicMemory = true;
7453#endif
7454 return rc;
7455}
7456
7457
7458/**
7459 * Unmap a page previously mapped by iemMemPageMap.
7460 *
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 * @param GCPhysMem The physical address.
7463 * @param fAccess The intended access.
7464 * @param pvMem What iemMemPageMap returned.
7465 * @param pLock The PGM lock.
7466 */
7467DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7468{
7469 NOREF(pVCpu);
7470 NOREF(GCPhysMem);
7471 NOREF(fAccess);
7472 NOREF(pvMem);
7473 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7474}
7475
7476
7477/**
7478 * Looks up a memory mapping entry.
7479 *
7480 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7482 * @param pvMem The memory address.
7483 * @param fAccess The access to.
7484 */
7485DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7486{
7487 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7488 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7489 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7490 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7491 return 0;
7492 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7493 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7494 return 1;
7495 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7496 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7497 return 2;
7498 return VERR_NOT_FOUND;
7499}
7500
7501
7502/**
7503 * Finds a free memmap entry when using iNextMapping doesn't work.
7504 *
7505 * @returns Memory mapping index, 1024 on failure.
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 */
7508IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7509{
7510 /*
7511 * The easy case.
7512 */
7513 if (pVCpu->iem.s.cActiveMappings == 0)
7514 {
7515 pVCpu->iem.s.iNextMapping = 1;
7516 return 0;
7517 }
7518
7519 /* There should be enough mappings for all instructions. */
7520 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7521
7522 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7523 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7524 return i;
7525
7526 AssertFailedReturn(1024);
7527}
7528
7529
7530/**
7531 * Commits a bounce buffer that needs writing back and unmaps it.
7532 *
7533 * @returns Strict VBox status code.
7534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7535 * @param iMemMap The index of the buffer to commit.
7536 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7537 * Always false in ring-3, obviously.
7538 */
7539IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7540{
7541 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7542 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7543#ifdef IN_RING3
7544 Assert(!fPostponeFail);
7545#endif
7546
7547 /*
7548 * Do the writing.
7549 */
7550#ifndef IEM_VERIFICATION_MODE_MINIMAL
7551 PVM pVM = pVCpu->CTX_SUFF(pVM);
7552 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7553 && !IEM_VERIFICATION_ENABLED(pVCpu))
7554 {
7555 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7556 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7557 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7558 if (!pVCpu->iem.s.fBypassHandlers)
7559 {
7560 /*
7561 * Carefully and efficiently dealing with access handler return
7562 * codes make this a little bloated.
7563 */
7564 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7566 pbBuf,
7567 cbFirst,
7568 PGMACCESSORIGIN_IEM);
7569 if (rcStrict == VINF_SUCCESS)
7570 {
7571 if (cbSecond)
7572 {
7573 rcStrict = PGMPhysWrite(pVM,
7574 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7575 pbBuf + cbFirst,
7576 cbSecond,
7577 PGMACCESSORIGIN_IEM);
7578 if (rcStrict == VINF_SUCCESS)
7579 { /* nothing */ }
7580 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7581 {
7582 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7584 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7585 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7586 }
7587# ifndef IN_RING3
7588 else if (fPostponeFail)
7589 {
7590 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7591 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7592 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7593 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7594 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7595 return iemSetPassUpStatus(pVCpu, rcStrict);
7596 }
7597# endif
7598 else
7599 {
7600 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7601 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7602 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7603 return rcStrict;
7604 }
7605 }
7606 }
7607 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7608 {
7609 if (!cbSecond)
7610 {
7611 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7612 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7613 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7614 }
7615 else
7616 {
7617 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7618 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7619 pbBuf + cbFirst,
7620 cbSecond,
7621 PGMACCESSORIGIN_IEM);
7622 if (rcStrict2 == VINF_SUCCESS)
7623 {
7624 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7625 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7626 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7627 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7628 }
7629 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7630 {
7631 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7632 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7633 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7634 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7635 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7636 }
7637# ifndef IN_RING3
7638 else if (fPostponeFail)
7639 {
7640 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7641 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7642 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7643 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7644 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7645 return iemSetPassUpStatus(pVCpu, rcStrict);
7646 }
7647# endif
7648 else
7649 {
7650 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7651 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7653 return rcStrict2;
7654 }
7655 }
7656 }
7657# ifndef IN_RING3
7658 else if (fPostponeFail)
7659 {
7660 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7661 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7662 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7663 if (!cbSecond)
7664 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7665 else
7666 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7667 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7668 return iemSetPassUpStatus(pVCpu, rcStrict);
7669 }
7670# endif
7671 else
7672 {
7673 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7674 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7675 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7676 return rcStrict;
7677 }
7678 }
7679 else
7680 {
7681 /*
7682 * No access handlers, much simpler.
7683 */
7684 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7685 if (RT_SUCCESS(rc))
7686 {
7687 if (cbSecond)
7688 {
7689 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7690 if (RT_SUCCESS(rc))
7691 { /* likely */ }
7692 else
7693 {
7694 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7696 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7697 return rc;
7698 }
7699 }
7700 }
7701 else
7702 {
7703 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7704 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7705 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7706 return rc;
7707 }
7708 }
7709 }
7710#endif
7711
7712#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7713 /*
7714 * Record the write(s).
7715 */
7716 if (!pVCpu->iem.s.fNoRem)
7717 {
7718 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7719 if (pEvtRec)
7720 {
7721 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7722 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7723 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7724 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7725 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7726 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7727 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7728 }
7729 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7730 {
7731 pEvtRec = iemVerifyAllocRecord(pVCpu);
7732 if (pEvtRec)
7733 {
7734 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7735 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7736 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7737 memcpy(pEvtRec->u.RamWrite.ab,
7738 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7739 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7740 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7741 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7742 }
7743 }
7744 }
7745#endif
7746#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7747 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7748 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7749 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7750 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7751 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7752 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7753
7754 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7755 g_cbIemWrote = cbWrote;
7756 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7757#endif
7758
7759 /*
7760 * Free the mapping entry.
7761 */
7762 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7763 Assert(pVCpu->iem.s.cActiveMappings != 0);
7764 pVCpu->iem.s.cActiveMappings--;
7765 return VINF_SUCCESS;
7766}
7767
7768
7769/**
7770 * iemMemMap worker that deals with a request crossing pages.
7771 */
7772IEM_STATIC VBOXSTRICTRC
7773iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
7774{
7775 /*
7776 * Do the address translations.
7777 */
7778 RTGCPHYS GCPhysFirst;
7779 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
7780 if (rcStrict != VINF_SUCCESS)
7781 return rcStrict;
7782
7783 RTGCPHYS GCPhysSecond;
7784 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
7785 fAccess, &GCPhysSecond);
7786 if (rcStrict != VINF_SUCCESS)
7787 return rcStrict;
7788 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
7789
7790 PVM pVM = pVCpu->CTX_SUFF(pVM);
7791#ifdef IEM_VERIFICATION_MODE_FULL
7792 /*
7793 * Detect problematic memory when verifying so we can select
7794 * the right execution engine. (TLB: Redo this.)
7795 */
7796 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7797 {
7798 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7799 if (RT_SUCCESS(rc2))
7800 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7801 if (RT_FAILURE(rc2))
7802 pVCpu->iem.s.fProblematicMemory = true;
7803 }
7804#endif
7805
7806
7807 /*
7808 * Read in the current memory content if it's a read, execute or partial
7809 * write access.
7810 */
7811 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7812 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
7813 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
7814
7815 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7816 {
7817 if (!pVCpu->iem.s.fBypassHandlers)
7818 {
7819 /*
7820 * Must carefully deal with access handler status codes here,
7821 * makes the code a bit bloated.
7822 */
7823 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
7824 if (rcStrict == VINF_SUCCESS)
7825 {
7826 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7827 if (rcStrict == VINF_SUCCESS)
7828 { /*likely */ }
7829 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7830 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7831 else
7832 {
7833 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
7834 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7835 return rcStrict;
7836 }
7837 }
7838 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7839 {
7840 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7841 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7842 {
7843 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7844 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7845 }
7846 else
7847 {
7848 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
7849 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
7850 return rcStrict2;
7851 }
7852 }
7853 else
7854 {
7855 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7856 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7857 return rcStrict;
7858 }
7859 }
7860 else
7861 {
7862 /*
7863 * No informational status codes here, much more straight forward.
7864 */
7865 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
7866 if (RT_SUCCESS(rc))
7867 {
7868 Assert(rc == VINF_SUCCESS);
7869 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
7870 if (RT_SUCCESS(rc))
7871 Assert(rc == VINF_SUCCESS);
7872 else
7873 {
7874 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
7875 return rc;
7876 }
7877 }
7878 else
7879 {
7880 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
7881 return rc;
7882 }
7883 }
7884
7885#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7886 if ( !pVCpu->iem.s.fNoRem
7887 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7888 {
7889 /*
7890 * Record the reads.
7891 */
7892 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7893 if (pEvtRec)
7894 {
7895 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7896 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
7897 pEvtRec->u.RamRead.cb = cbFirstPage;
7898 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7899 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7900 }
7901 pEvtRec = iemVerifyAllocRecord(pVCpu);
7902 if (pEvtRec)
7903 {
7904 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7905 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
7906 pEvtRec->u.RamRead.cb = cbSecondPage;
7907 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7908 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7909 }
7910 }
7911#endif
7912 }
7913#ifdef VBOX_STRICT
7914 else
7915 memset(pbBuf, 0xcc, cbMem);
7916 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
7917 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
7918#endif
7919
7920 /*
7921 * Commit the bounce buffer entry.
7922 */
7923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
7924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
7925 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
7926 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
7927 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
7928 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
7929 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
7930 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7931 pVCpu->iem.s.cActiveMappings++;
7932
7933 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7934 *ppvMem = pbBuf;
7935 return VINF_SUCCESS;
7936}
7937
7938
7939/**
7940 * iemMemMap woker that deals with iemMemPageMap failures.
7941 */
7942IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
7943 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
7944{
7945 /*
7946 * Filter out conditions we can handle and the ones which shouldn't happen.
7947 */
7948 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
7949 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
7950 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
7951 {
7952 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
7953 return rcMap;
7954 }
7955 pVCpu->iem.s.cPotentialExits++;
7956
7957 /*
7958 * Read in the current memory content if it's a read, execute or partial
7959 * write access.
7960 */
7961 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7962 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7963 {
7964 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
7965 memset(pbBuf, 0xff, cbMem);
7966 else
7967 {
7968 int rc;
7969 if (!pVCpu->iem.s.fBypassHandlers)
7970 {
7971 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
7972 if (rcStrict == VINF_SUCCESS)
7973 { /* nothing */ }
7974 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7975 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7976 else
7977 {
7978 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7979 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7980 return rcStrict;
7981 }
7982 }
7983 else
7984 {
7985 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
7986 if (RT_SUCCESS(rc))
7987 { /* likely */ }
7988 else
7989 {
7990 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7991 GCPhysFirst, rc));
7992 return rc;
7993 }
7994 }
7995 }
7996
7997#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7998 if ( !pVCpu->iem.s.fNoRem
7999 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8000 {
8001 /*
8002 * Record the read.
8003 */
8004 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8005 if (pEvtRec)
8006 {
8007 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8008 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8009 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8010 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8011 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8012 }
8013 }
8014#endif
8015 }
8016#ifdef VBOX_STRICT
8017 else
8018 memset(pbBuf, 0xcc, cbMem);
8019#endif
8020#ifdef VBOX_STRICT
8021 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8022 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8023#endif
8024
8025 /*
8026 * Commit the bounce buffer entry.
8027 */
8028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8029 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8030 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8031 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8032 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8033 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8034 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8035 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8036 pVCpu->iem.s.cActiveMappings++;
8037
8038 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8039 *ppvMem = pbBuf;
8040 return VINF_SUCCESS;
8041}
8042
8043
8044
8045/**
8046 * Maps the specified guest memory for the given kind of access.
8047 *
8048 * This may be using bounce buffering of the memory if it's crossing a page
8049 * boundary or if there is an access handler installed for any of it. Because
8050 * of lock prefix guarantees, we're in for some extra clutter when this
8051 * happens.
8052 *
8053 * This may raise a \#GP, \#SS, \#PF or \#AC.
8054 *
8055 * @returns VBox strict status code.
8056 *
8057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8058 * @param ppvMem Where to return the pointer to the mapped
8059 * memory.
8060 * @param cbMem The number of bytes to map. This is usually 1,
8061 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8062 * string operations it can be up to a page.
8063 * @param iSegReg The index of the segment register to use for
8064 * this access. The base and limits are checked.
8065 * Use UINT8_MAX to indicate that no segmentation
8066 * is required (for IDT, GDT and LDT accesses).
8067 * @param GCPtrMem The address of the guest memory.
8068 * @param fAccess How the memory is being accessed. The
8069 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8070 * how to map the memory, while the
8071 * IEM_ACCESS_WHAT_XXX bit is used when raising
8072 * exceptions.
8073 */
8074IEM_STATIC VBOXSTRICTRC
8075iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8076{
8077 /*
8078 * Check the input and figure out which mapping entry to use.
8079 */
8080 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8081 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8082 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8083
8084 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8085 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8086 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8087 {
8088 iMemMap = iemMemMapFindFree(pVCpu);
8089 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8090 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8091 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8092 pVCpu->iem.s.aMemMappings[2].fAccess),
8093 VERR_IEM_IPE_9);
8094 }
8095
8096 /*
8097 * Map the memory, checking that we can actually access it. If something
8098 * slightly complicated happens, fall back on bounce buffering.
8099 */
8100 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8101 if (rcStrict != VINF_SUCCESS)
8102 return rcStrict;
8103
8104 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8105 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8106
8107 RTGCPHYS GCPhysFirst;
8108 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8109 if (rcStrict != VINF_SUCCESS)
8110 return rcStrict;
8111
8112 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8113 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8114 if (fAccess & IEM_ACCESS_TYPE_READ)
8115 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8116
8117 void *pvMem;
8118 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8119 if (rcStrict != VINF_SUCCESS)
8120 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8121
8122 /*
8123 * Fill in the mapping table entry.
8124 */
8125 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8126 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8127 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8128 pVCpu->iem.s.cActiveMappings++;
8129
8130 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8131 *ppvMem = pvMem;
8132 return VINF_SUCCESS;
8133}
8134
8135
8136/**
8137 * Commits the guest memory if bounce buffered and unmaps it.
8138 *
8139 * @returns Strict VBox status code.
8140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8141 * @param pvMem The mapping.
8142 * @param fAccess The kind of access.
8143 */
8144IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8145{
8146 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8147 AssertReturn(iMemMap >= 0, iMemMap);
8148
8149 /* If it's bounce buffered, we may need to write back the buffer. */
8150 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8151 {
8152 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8153 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8154 }
8155 /* Otherwise unlock it. */
8156 else
8157 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8158
8159 /* Free the entry. */
8160 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8161 Assert(pVCpu->iem.s.cActiveMappings != 0);
8162 pVCpu->iem.s.cActiveMappings--;
8163 return VINF_SUCCESS;
8164}
8165
8166#ifdef IEM_WITH_SETJMP
8167
8168/**
8169 * Maps the specified guest memory for the given kind of access, longjmp on
8170 * error.
8171 *
8172 * This may be using bounce buffering of the memory if it's crossing a page
8173 * boundary or if there is an access handler installed for any of it. Because
8174 * of lock prefix guarantees, we're in for some extra clutter when this
8175 * happens.
8176 *
8177 * This may raise a \#GP, \#SS, \#PF or \#AC.
8178 *
8179 * @returns Pointer to the mapped memory.
8180 *
8181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8182 * @param cbMem The number of bytes to map. This is usually 1,
8183 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8184 * string operations it can be up to a page.
8185 * @param iSegReg The index of the segment register to use for
8186 * this access. The base and limits are checked.
8187 * Use UINT8_MAX to indicate that no segmentation
8188 * is required (for IDT, GDT and LDT accesses).
8189 * @param GCPtrMem The address of the guest memory.
8190 * @param fAccess How the memory is being accessed. The
8191 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8192 * how to map the memory, while the
8193 * IEM_ACCESS_WHAT_XXX bit is used when raising
8194 * exceptions.
8195 */
8196IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8197{
8198 /*
8199 * Check the input and figure out which mapping entry to use.
8200 */
8201 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8202 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8203 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8204
8205 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8206 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8207 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8208 {
8209 iMemMap = iemMemMapFindFree(pVCpu);
8210 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8211 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8212 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8213 pVCpu->iem.s.aMemMappings[2].fAccess),
8214 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8215 }
8216
8217 /*
8218 * Map the memory, checking that we can actually access it. If something
8219 * slightly complicated happens, fall back on bounce buffering.
8220 */
8221 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8222 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8223 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8224
8225 /* Crossing a page boundary? */
8226 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8227 { /* No (likely). */ }
8228 else
8229 {
8230 void *pvMem;
8231 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8232 if (rcStrict == VINF_SUCCESS)
8233 return pvMem;
8234 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8235 }
8236
8237 RTGCPHYS GCPhysFirst;
8238 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8239 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8240 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8241
8242 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8243 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8244 if (fAccess & IEM_ACCESS_TYPE_READ)
8245 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8246
8247 void *pvMem;
8248 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8249 if (rcStrict == VINF_SUCCESS)
8250 { /* likely */ }
8251 else
8252 {
8253 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8254 if (rcStrict == VINF_SUCCESS)
8255 return pvMem;
8256 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8257 }
8258
8259 /*
8260 * Fill in the mapping table entry.
8261 */
8262 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8263 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8264 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8265 pVCpu->iem.s.cActiveMappings++;
8266
8267 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8268 return pvMem;
8269}
8270
8271
8272/**
8273 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8274 *
8275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8276 * @param pvMem The mapping.
8277 * @param fAccess The kind of access.
8278 */
8279IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8280{
8281 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8282 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8283
8284 /* If it's bounce buffered, we may need to write back the buffer. */
8285 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8286 {
8287 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8288 {
8289 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8290 if (rcStrict == VINF_SUCCESS)
8291 return;
8292 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8293 }
8294 }
8295 /* Otherwise unlock it. */
8296 else
8297 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8298
8299 /* Free the entry. */
8300 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8301 Assert(pVCpu->iem.s.cActiveMappings != 0);
8302 pVCpu->iem.s.cActiveMappings--;
8303}
8304
8305#endif
8306
8307#ifndef IN_RING3
8308/**
8309 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8310 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8311 *
8312 * Allows the instruction to be completed and retired, while the IEM user will
8313 * return to ring-3 immediately afterwards and do the postponed writes there.
8314 *
8315 * @returns VBox status code (no strict statuses). Caller must check
8316 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param pvMem The mapping.
8319 * @param fAccess The kind of access.
8320 */
8321IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8322{
8323 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8324 AssertReturn(iMemMap >= 0, iMemMap);
8325
8326 /* If it's bounce buffered, we may need to write back the buffer. */
8327 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8328 {
8329 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8330 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8331 }
8332 /* Otherwise unlock it. */
8333 else
8334 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8335
8336 /* Free the entry. */
8337 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8338 Assert(pVCpu->iem.s.cActiveMappings != 0);
8339 pVCpu->iem.s.cActiveMappings--;
8340 return VINF_SUCCESS;
8341}
8342#endif
8343
8344
8345/**
8346 * Rollbacks mappings, releasing page locks and such.
8347 *
8348 * The caller shall only call this after checking cActiveMappings.
8349 *
8350 * @returns Strict VBox status code to pass up.
8351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8352 */
8353IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8354{
8355 Assert(pVCpu->iem.s.cActiveMappings > 0);
8356
8357 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8358 while (iMemMap-- > 0)
8359 {
8360 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8361 if (fAccess != IEM_ACCESS_INVALID)
8362 {
8363 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8364 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8365 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8366 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8367 Assert(pVCpu->iem.s.cActiveMappings > 0);
8368 pVCpu->iem.s.cActiveMappings--;
8369 }
8370 }
8371}
8372
8373
8374/**
8375 * Fetches a data byte.
8376 *
8377 * @returns Strict VBox status code.
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param pu8Dst Where to return the byte.
8380 * @param iSegReg The index of the segment register to use for
8381 * this access. The base and limits are checked.
8382 * @param GCPtrMem The address of the guest memory.
8383 */
8384IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8385{
8386 /* The lazy approach for now... */
8387 uint8_t const *pu8Src;
8388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8389 if (rc == VINF_SUCCESS)
8390 {
8391 *pu8Dst = *pu8Src;
8392 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8393 }
8394 return rc;
8395}
8396
8397
8398#ifdef IEM_WITH_SETJMP
8399/**
8400 * Fetches a data byte, longjmp on error.
8401 *
8402 * @returns The byte.
8403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8404 * @param iSegReg The index of the segment register to use for
8405 * this access. The base and limits are checked.
8406 * @param GCPtrMem The address of the guest memory.
8407 */
8408DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8409{
8410 /* The lazy approach for now... */
8411 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8412 uint8_t const bRet = *pu8Src;
8413 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8414 return bRet;
8415}
8416#endif /* IEM_WITH_SETJMP */
8417
8418
8419/**
8420 * Fetches a data word.
8421 *
8422 * @returns Strict VBox status code.
8423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8424 * @param pu16Dst Where to return the word.
8425 * @param iSegReg The index of the segment register to use for
8426 * this access. The base and limits are checked.
8427 * @param GCPtrMem The address of the guest memory.
8428 */
8429IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8430{
8431 /* The lazy approach for now... */
8432 uint16_t const *pu16Src;
8433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8434 if (rc == VINF_SUCCESS)
8435 {
8436 *pu16Dst = *pu16Src;
8437 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8438 }
8439 return rc;
8440}
8441
8442
8443#ifdef IEM_WITH_SETJMP
8444/**
8445 * Fetches a data word, longjmp on error.
8446 *
8447 * @returns The word
8448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8449 * @param iSegReg The index of the segment register to use for
8450 * this access. The base and limits are checked.
8451 * @param GCPtrMem The address of the guest memory.
8452 */
8453DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8454{
8455 /* The lazy approach for now... */
8456 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8457 uint16_t const u16Ret = *pu16Src;
8458 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8459 return u16Ret;
8460}
8461#endif
8462
8463
8464/**
8465 * Fetches a data dword.
8466 *
8467 * @returns Strict VBox status code.
8468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8469 * @param pu32Dst Where to return the dword.
8470 * @param iSegReg The index of the segment register to use for
8471 * this access. The base and limits are checked.
8472 * @param GCPtrMem The address of the guest memory.
8473 */
8474IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8475{
8476 /* The lazy approach for now... */
8477 uint32_t const *pu32Src;
8478 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8479 if (rc == VINF_SUCCESS)
8480 {
8481 *pu32Dst = *pu32Src;
8482 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8483 }
8484 return rc;
8485}
8486
8487
8488#ifdef IEM_WITH_SETJMP
8489
8490IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8491{
8492 Assert(cbMem >= 1);
8493 Assert(iSegReg < X86_SREG_COUNT);
8494
8495 /*
8496 * 64-bit mode is simpler.
8497 */
8498 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8499 {
8500 if (iSegReg >= X86_SREG_FS)
8501 {
8502 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8503 GCPtrMem += pSel->u64Base;
8504 }
8505
8506 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8507 return GCPtrMem;
8508 }
8509 /*
8510 * 16-bit and 32-bit segmentation.
8511 */
8512 else
8513 {
8514 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8515 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8516 == X86DESCATTR_P /* data, expand up */
8517 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8518 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8519 {
8520 /* expand up */
8521 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8522 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8523 && GCPtrLast32 > (uint32_t)GCPtrMem))
8524 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8525 }
8526 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8527 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8528 {
8529 /* expand down */
8530 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8531 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8532 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8533 && GCPtrLast32 > (uint32_t)GCPtrMem))
8534 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8535 }
8536 else
8537 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8538 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8539 }
8540 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8541}
8542
8543
8544IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8545{
8546 Assert(cbMem >= 1);
8547 Assert(iSegReg < X86_SREG_COUNT);
8548
8549 /*
8550 * 64-bit mode is simpler.
8551 */
8552 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8553 {
8554 if (iSegReg >= X86_SREG_FS)
8555 {
8556 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8557 GCPtrMem += pSel->u64Base;
8558 }
8559
8560 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8561 return GCPtrMem;
8562 }
8563 /*
8564 * 16-bit and 32-bit segmentation.
8565 */
8566 else
8567 {
8568 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8569 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8570 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8571 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8572 {
8573 /* expand up */
8574 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8575 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8576 && GCPtrLast32 > (uint32_t)GCPtrMem))
8577 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8578 }
8579 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8580 {
8581 /* expand down */
8582 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8583 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8584 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8585 && GCPtrLast32 > (uint32_t)GCPtrMem))
8586 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8587 }
8588 else
8589 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8590 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8591 }
8592 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8593}
8594
8595
8596/**
8597 * Fetches a data dword, longjmp on error, fallback/safe version.
8598 *
8599 * @returns The dword
8600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8601 * @param iSegReg The index of the segment register to use for
8602 * this access. The base and limits are checked.
8603 * @param GCPtrMem The address of the guest memory.
8604 */
8605IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8606{
8607 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8608 uint32_t const u32Ret = *pu32Src;
8609 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8610 return u32Ret;
8611}
8612
8613
8614/**
8615 * Fetches a data dword, longjmp on error.
8616 *
8617 * @returns The dword
8618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8619 * @param iSegReg The index of the segment register to use for
8620 * this access. The base and limits are checked.
8621 * @param GCPtrMem The address of the guest memory.
8622 */
8623DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8624{
8625# ifdef IEM_WITH_DATA_TLB
8626 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8627 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8628 {
8629 /// @todo more later.
8630 }
8631
8632 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8633# else
8634 /* The lazy approach. */
8635 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8636 uint32_t const u32Ret = *pu32Src;
8637 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8638 return u32Ret;
8639# endif
8640}
8641#endif
8642
8643
8644#ifdef SOME_UNUSED_FUNCTION
8645/**
8646 * Fetches a data dword and sign extends it to a qword.
8647 *
8648 * @returns Strict VBox status code.
8649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8650 * @param pu64Dst Where to return the sign extended value.
8651 * @param iSegReg The index of the segment register to use for
8652 * this access. The base and limits are checked.
8653 * @param GCPtrMem The address of the guest memory.
8654 */
8655IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8656{
8657 /* The lazy approach for now... */
8658 int32_t const *pi32Src;
8659 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8660 if (rc == VINF_SUCCESS)
8661 {
8662 *pu64Dst = *pi32Src;
8663 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8664 }
8665#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8666 else
8667 *pu64Dst = 0;
8668#endif
8669 return rc;
8670}
8671#endif
8672
8673
8674/**
8675 * Fetches a data qword.
8676 *
8677 * @returns Strict VBox status code.
8678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8679 * @param pu64Dst Where to return the qword.
8680 * @param iSegReg The index of the segment register to use for
8681 * this access. The base and limits are checked.
8682 * @param GCPtrMem The address of the guest memory.
8683 */
8684IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8685{
8686 /* The lazy approach for now... */
8687 uint64_t const *pu64Src;
8688 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8689 if (rc == VINF_SUCCESS)
8690 {
8691 *pu64Dst = *pu64Src;
8692 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8693 }
8694 return rc;
8695}
8696
8697
8698#ifdef IEM_WITH_SETJMP
8699/**
8700 * Fetches a data qword, longjmp on error.
8701 *
8702 * @returns The qword.
8703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8704 * @param iSegReg The index of the segment register to use for
8705 * this access. The base and limits are checked.
8706 * @param GCPtrMem The address of the guest memory.
8707 */
8708DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8709{
8710 /* The lazy approach for now... */
8711 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8712 uint64_t const u64Ret = *pu64Src;
8713 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8714 return u64Ret;
8715}
8716#endif
8717
8718
8719/**
8720 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8721 *
8722 * @returns Strict VBox status code.
8723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8724 * @param pu64Dst Where to return the qword.
8725 * @param iSegReg The index of the segment register to use for
8726 * this access. The base and limits are checked.
8727 * @param GCPtrMem The address of the guest memory.
8728 */
8729IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8730{
8731 /* The lazy approach for now... */
8732 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8733 if (RT_UNLIKELY(GCPtrMem & 15))
8734 return iemRaiseGeneralProtectionFault0(pVCpu);
8735
8736 uint64_t const *pu64Src;
8737 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8738 if (rc == VINF_SUCCESS)
8739 {
8740 *pu64Dst = *pu64Src;
8741 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8742 }
8743 return rc;
8744}
8745
8746
8747#ifdef IEM_WITH_SETJMP
8748/**
8749 * Fetches a data qword, longjmp on error.
8750 *
8751 * @returns The qword.
8752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8753 * @param iSegReg The index of the segment register to use for
8754 * this access. The base and limits are checked.
8755 * @param GCPtrMem The address of the guest memory.
8756 */
8757DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8758{
8759 /* The lazy approach for now... */
8760 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8761 if (RT_LIKELY(!(GCPtrMem & 15)))
8762 {
8763 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8764 uint64_t const u64Ret = *pu64Src;
8765 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8766 return u64Ret;
8767 }
8768
8769 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
8770 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
8771}
8772#endif
8773
8774
8775/**
8776 * Fetches a data tword.
8777 *
8778 * @returns Strict VBox status code.
8779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8780 * @param pr80Dst Where to return the tword.
8781 * @param iSegReg The index of the segment register to use for
8782 * this access. The base and limits are checked.
8783 * @param GCPtrMem The address of the guest memory.
8784 */
8785IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8786{
8787 /* The lazy approach for now... */
8788 PCRTFLOAT80U pr80Src;
8789 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8790 if (rc == VINF_SUCCESS)
8791 {
8792 *pr80Dst = *pr80Src;
8793 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8794 }
8795 return rc;
8796}
8797
8798
8799#ifdef IEM_WITH_SETJMP
8800/**
8801 * Fetches a data tword, longjmp on error.
8802 *
8803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8804 * @param pr80Dst Where to return the tword.
8805 * @param iSegReg The index of the segment register to use for
8806 * this access. The base and limits are checked.
8807 * @param GCPtrMem The address of the guest memory.
8808 */
8809DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8810{
8811 /* The lazy approach for now... */
8812 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8813 *pr80Dst = *pr80Src;
8814 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8815}
8816#endif
8817
8818
8819/**
8820 * Fetches a data dqword (double qword), generally SSE related.
8821 *
8822 * @returns Strict VBox status code.
8823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8824 * @param pu128Dst Where to return the qword.
8825 * @param iSegReg The index of the segment register to use for
8826 * this access. The base and limits are checked.
8827 * @param GCPtrMem The address of the guest memory.
8828 */
8829IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8830{
8831 /* The lazy approach for now... */
8832 uint128_t const *pu128Src;
8833 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8834 if (rc == VINF_SUCCESS)
8835 {
8836 *pu128Dst = *pu128Src;
8837 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8838 }
8839 return rc;
8840}
8841
8842
8843#ifdef IEM_WITH_SETJMP
8844/**
8845 * Fetches a data dqword (double qword), generally SSE related.
8846 *
8847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8848 * @param pu128Dst Where to return the qword.
8849 * @param iSegReg The index of the segment register to use for
8850 * this access. The base and limits are checked.
8851 * @param GCPtrMem The address of the guest memory.
8852 */
8853IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8854{
8855 /* The lazy approach for now... */
8856 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8857 *pu128Dst = *pu128Src;
8858 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8859}
8860#endif
8861
8862
8863/**
8864 * Fetches a data dqword (double qword) at an aligned address, generally SSE
8865 * related.
8866 *
8867 * Raises \#GP(0) if not aligned.
8868 *
8869 * @returns Strict VBox status code.
8870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8871 * @param pu128Dst Where to return the qword.
8872 * @param iSegReg The index of the segment register to use for
8873 * this access. The base and limits are checked.
8874 * @param GCPtrMem The address of the guest memory.
8875 */
8876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8877{
8878 /* The lazy approach for now... */
8879 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8880 if ( (GCPtrMem & 15)
8881 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
8882 return iemRaiseGeneralProtectionFault0(pVCpu);
8883
8884 uint128_t const *pu128Src;
8885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8886 if (rc == VINF_SUCCESS)
8887 {
8888 *pu128Dst = *pu128Src;
8889 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8890 }
8891 return rc;
8892}
8893
8894
8895#ifdef IEM_WITH_SETJMP
8896/**
8897 * Fetches a data dqword (double qword) at an aligned address, generally SSE
8898 * related, longjmp on error.
8899 *
8900 * Raises \#GP(0) if not aligned.
8901 *
8902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8903 * @param pu128Dst Where to return the qword.
8904 * @param iSegReg The index of the segment register to use for
8905 * this access. The base and limits are checked.
8906 * @param GCPtrMem The address of the guest memory.
8907 */
8908DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8909{
8910 /* The lazy approach for now... */
8911 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8912 if ( (GCPtrMem & 15) == 0
8913 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
8914 {
8915 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
8916 IEM_ACCESS_DATA_R);
8917 *pu128Dst = *pu128Src;
8918 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8919 return;
8920 }
8921
8922 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
8923 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8924}
8925#endif
8926
8927
8928
8929/**
8930 * Fetches a descriptor register (lgdt, lidt).
8931 *
8932 * @returns Strict VBox status code.
8933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8934 * @param pcbLimit Where to return the limit.
8935 * @param pGCPtrBase Where to return the base.
8936 * @param iSegReg The index of the segment register to use for
8937 * this access. The base and limits are checked.
8938 * @param GCPtrMem The address of the guest memory.
8939 * @param enmOpSize The effective operand size.
8940 */
8941IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
8942 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
8943{
8944 /*
8945 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
8946 * little special:
8947 * - The two reads are done separately.
8948 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
8949 * - We suspect the 386 to actually commit the limit before the base in
8950 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
8951 * don't try emulate this eccentric behavior, because it's not well
8952 * enough understood and rather hard to trigger.
8953 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
8954 */
8955 VBOXSTRICTRC rcStrict;
8956 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8957 {
8958 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8959 if (rcStrict == VINF_SUCCESS)
8960 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
8961 }
8962 else
8963 {
8964 uint32_t uTmp;
8965 if (enmOpSize == IEMMODE_32BIT)
8966 {
8967 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
8968 {
8969 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8970 if (rcStrict == VINF_SUCCESS)
8971 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8972 }
8973 else
8974 {
8975 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
8976 if (rcStrict == VINF_SUCCESS)
8977 {
8978 *pcbLimit = (uint16_t)uTmp;
8979 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8980 }
8981 }
8982 if (rcStrict == VINF_SUCCESS)
8983 *pGCPtrBase = uTmp;
8984 }
8985 else
8986 {
8987 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
8988 if (rcStrict == VINF_SUCCESS)
8989 {
8990 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
8991 if (rcStrict == VINF_SUCCESS)
8992 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
8993 }
8994 }
8995 }
8996 return rcStrict;
8997}
8998
8999
9000
9001/**
9002 * Stores a data byte.
9003 *
9004 * @returns Strict VBox status code.
9005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9006 * @param iSegReg The index of the segment register to use for
9007 * this access. The base and limits are checked.
9008 * @param GCPtrMem The address of the guest memory.
9009 * @param u8Value The value to store.
9010 */
9011IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9012{
9013 /* The lazy approach for now... */
9014 uint8_t *pu8Dst;
9015 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9016 if (rc == VINF_SUCCESS)
9017 {
9018 *pu8Dst = u8Value;
9019 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9020 }
9021 return rc;
9022}
9023
9024
9025#ifdef IEM_WITH_SETJMP
9026/**
9027 * Stores a data byte, longjmp on error.
9028 *
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 * @param iSegReg The index of the segment register to use for
9031 * this access. The base and limits are checked.
9032 * @param GCPtrMem The address of the guest memory.
9033 * @param u8Value The value to store.
9034 */
9035IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9036{
9037 /* The lazy approach for now... */
9038 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9039 *pu8Dst = u8Value;
9040 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9041}
9042#endif
9043
9044
9045/**
9046 * Stores a data word.
9047 *
9048 * @returns Strict VBox status code.
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param iSegReg The index of the segment register to use for
9051 * this access. The base and limits are checked.
9052 * @param GCPtrMem The address of the guest memory.
9053 * @param u16Value The value to store.
9054 */
9055IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9056{
9057 /* The lazy approach for now... */
9058 uint16_t *pu16Dst;
9059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9060 if (rc == VINF_SUCCESS)
9061 {
9062 *pu16Dst = u16Value;
9063 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9064 }
9065 return rc;
9066}
9067
9068
9069#ifdef IEM_WITH_SETJMP
9070/**
9071 * Stores a data word, longjmp on error.
9072 *
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param iSegReg The index of the segment register to use for
9075 * this access. The base and limits are checked.
9076 * @param GCPtrMem The address of the guest memory.
9077 * @param u16Value The value to store.
9078 */
9079IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9080{
9081 /* The lazy approach for now... */
9082 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9083 *pu16Dst = u16Value;
9084 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9085}
9086#endif
9087
9088
9089/**
9090 * Stores a data dword.
9091 *
9092 * @returns Strict VBox status code.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param iSegReg The index of the segment register to use for
9095 * this access. The base and limits are checked.
9096 * @param GCPtrMem The address of the guest memory.
9097 * @param u32Value The value to store.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9100{
9101 /* The lazy approach for now... */
9102 uint32_t *pu32Dst;
9103 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9104 if (rc == VINF_SUCCESS)
9105 {
9106 *pu32Dst = u32Value;
9107 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9108 }
9109 return rc;
9110}
9111
9112
9113#ifdef IEM_WITH_SETJMP
9114/**
9115 * Stores a data dword.
9116 *
9117 * @returns Strict VBox status code.
9118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9119 * @param iSegReg The index of the segment register to use for
9120 * this access. The base and limits are checked.
9121 * @param GCPtrMem The address of the guest memory.
9122 * @param u32Value The value to store.
9123 */
9124IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9125{
9126 /* The lazy approach for now... */
9127 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9128 *pu32Dst = u32Value;
9129 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9130}
9131#endif
9132
9133
9134/**
9135 * Stores a data qword.
9136 *
9137 * @returns Strict VBox status code.
9138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 * @param u64Value The value to store.
9143 */
9144IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9145{
9146 /* The lazy approach for now... */
9147 uint64_t *pu64Dst;
9148 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9149 if (rc == VINF_SUCCESS)
9150 {
9151 *pu64Dst = u64Value;
9152 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9153 }
9154 return rc;
9155}
9156
9157
9158#ifdef IEM_WITH_SETJMP
9159/**
9160 * Stores a data qword, longjmp on error.
9161 *
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 * @param u64Value The value to store.
9167 */
9168IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9169{
9170 /* The lazy approach for now... */
9171 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9172 *pu64Dst = u64Value;
9173 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9174}
9175#endif
9176
9177
9178/**
9179 * Stores a data dqword.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 * @param u128Value The value to store.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9189{
9190 /* The lazy approach for now... */
9191 uint128_t *pu128Dst;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu128Dst = u128Value;
9196 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203/**
9204 * Stores a data dqword, longjmp on error.
9205 *
9206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9207 * @param iSegReg The index of the segment register to use for
9208 * this access. The base and limits are checked.
9209 * @param GCPtrMem The address of the guest memory.
9210 * @param u128Value The value to store.
9211 */
9212IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9213{
9214 /* The lazy approach for now... */
9215 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9216 *pu128Dst = u128Value;
9217 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9218}
9219#endif
9220
9221
9222/**
9223 * Stores a data dqword, SSE aligned.
9224 *
9225 * @returns Strict VBox status code.
9226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9227 * @param iSegReg The index of the segment register to use for
9228 * this access. The base and limits are checked.
9229 * @param GCPtrMem The address of the guest memory.
9230 * @param u128Value The value to store.
9231 */
9232IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9233{
9234 /* The lazy approach for now... */
9235 if ( (GCPtrMem & 15)
9236 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9237 return iemRaiseGeneralProtectionFault0(pVCpu);
9238
9239 uint128_t *pu128Dst;
9240 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9241 if (rc == VINF_SUCCESS)
9242 {
9243 *pu128Dst = u128Value;
9244 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9245 }
9246 return rc;
9247}
9248
9249
9250#ifdef IEM_WITH_SETJMP
9251/**
9252 * Stores a data dqword, SSE aligned.
9253 *
9254 * @returns Strict VBox status code.
9255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9256 * @param iSegReg The index of the segment register to use for
9257 * this access. The base and limits are checked.
9258 * @param GCPtrMem The address of the guest memory.
9259 * @param u128Value The value to store.
9260 */
9261DECL_NO_INLINE(IEM_STATIC, void)
9262iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9263{
9264 /* The lazy approach for now... */
9265 if ( (GCPtrMem & 15) == 0
9266 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9267 {
9268 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9269 *pu128Dst = u128Value;
9270 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9271 return;
9272 }
9273
9274 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9275 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9276}
9277#endif
9278
9279
9280/**
9281 * Stores a descriptor register (sgdt, sidt).
9282 *
9283 * @returns Strict VBox status code.
9284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9285 * @param cbLimit The limit.
9286 * @param GCPtrBase The base address.
9287 * @param iSegReg The index of the segment register to use for
9288 * this access. The base and limits are checked.
9289 * @param GCPtrMem The address of the guest memory.
9290 */
9291IEM_STATIC VBOXSTRICTRC
9292iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9293{
9294 /*
9295 * The SIDT and SGDT instructions actually stores the data using two
9296 * independent writes. The instructions does not respond to opsize prefixes.
9297 */
9298 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9299 if (rcStrict == VINF_SUCCESS)
9300 {
9301 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9302 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9303 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9304 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9305 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9306 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9307 else
9308 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9309 }
9310 return rcStrict;
9311}
9312
9313
9314/**
9315 * Pushes a word onto the stack.
9316 *
9317 * @returns Strict VBox status code.
9318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9319 * @param u16Value The value to push.
9320 */
9321IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9322{
9323 /* Increment the stack pointer. */
9324 uint64_t uNewRsp;
9325 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9326 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9327
9328 /* Write the word the lazy way. */
9329 uint16_t *pu16Dst;
9330 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9331 if (rc == VINF_SUCCESS)
9332 {
9333 *pu16Dst = u16Value;
9334 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9335 }
9336
9337 /* Commit the new RSP value unless we an access handler made trouble. */
9338 if (rc == VINF_SUCCESS)
9339 pCtx->rsp = uNewRsp;
9340
9341 return rc;
9342}
9343
9344
9345/**
9346 * Pushes a dword onto the stack.
9347 *
9348 * @returns Strict VBox status code.
9349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9350 * @param u32Value The value to push.
9351 */
9352IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9353{
9354 /* Increment the stack pointer. */
9355 uint64_t uNewRsp;
9356 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9357 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9358
9359 /* Write the dword the lazy way. */
9360 uint32_t *pu32Dst;
9361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9362 if (rc == VINF_SUCCESS)
9363 {
9364 *pu32Dst = u32Value;
9365 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9366 }
9367
9368 /* Commit the new RSP value unless we an access handler made trouble. */
9369 if (rc == VINF_SUCCESS)
9370 pCtx->rsp = uNewRsp;
9371
9372 return rc;
9373}
9374
9375
9376/**
9377 * Pushes a dword segment register value onto the stack.
9378 *
9379 * @returns Strict VBox status code.
9380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9381 * @param u32Value The value to push.
9382 */
9383IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9384{
9385 /* Increment the stack pointer. */
9386 uint64_t uNewRsp;
9387 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9388 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9389
9390 VBOXSTRICTRC rc;
9391 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9392 {
9393 /* The recompiler writes a full dword. */
9394 uint32_t *pu32Dst;
9395 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9396 if (rc == VINF_SUCCESS)
9397 {
9398 *pu32Dst = u32Value;
9399 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9400 }
9401 }
9402 else
9403 {
9404 /* The intel docs talks about zero extending the selector register
9405 value. My actual intel CPU here might be zero extending the value
9406 but it still only writes the lower word... */
9407 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9408 * happens when crossing an electric page boundrary, is the high word checked
9409 * for write accessibility or not? Probably it is. What about segment limits?
9410 * It appears this behavior is also shared with trap error codes.
9411 *
9412 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9413 * ancient hardware when it actually did change. */
9414 uint16_t *pu16Dst;
9415 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9416 if (rc == VINF_SUCCESS)
9417 {
9418 *pu16Dst = (uint16_t)u32Value;
9419 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9420 }
9421 }
9422
9423 /* Commit the new RSP value unless we an access handler made trouble. */
9424 if (rc == VINF_SUCCESS)
9425 pCtx->rsp = uNewRsp;
9426
9427 return rc;
9428}
9429
9430
9431/**
9432 * Pushes a qword onto the stack.
9433 *
9434 * @returns Strict VBox status code.
9435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9436 * @param u64Value The value to push.
9437 */
9438IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9439{
9440 /* Increment the stack pointer. */
9441 uint64_t uNewRsp;
9442 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9443 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9444
9445 /* Write the word the lazy way. */
9446 uint64_t *pu64Dst;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = u64Value;
9451 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9452 }
9453
9454 /* Commit the new RSP value unless we an access handler made trouble. */
9455 if (rc == VINF_SUCCESS)
9456 pCtx->rsp = uNewRsp;
9457
9458 return rc;
9459}
9460
9461
9462/**
9463 * Pops a word from the stack.
9464 *
9465 * @returns Strict VBox status code.
9466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9467 * @param pu16Value Where to store the popped value.
9468 */
9469IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9470{
9471 /* Increment the stack pointer. */
9472 uint64_t uNewRsp;
9473 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9474 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9475
9476 /* Write the word the lazy way. */
9477 uint16_t const *pu16Src;
9478 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9479 if (rc == VINF_SUCCESS)
9480 {
9481 *pu16Value = *pu16Src;
9482 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9483
9484 /* Commit the new RSP value. */
9485 if (rc == VINF_SUCCESS)
9486 pCtx->rsp = uNewRsp;
9487 }
9488
9489 return rc;
9490}
9491
9492
9493/**
9494 * Pops a dword from the stack.
9495 *
9496 * @returns Strict VBox status code.
9497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9498 * @param pu32Value Where to store the popped value.
9499 */
9500IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9501{
9502 /* Increment the stack pointer. */
9503 uint64_t uNewRsp;
9504 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9505 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9506
9507 /* Write the word the lazy way. */
9508 uint32_t const *pu32Src;
9509 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9510 if (rc == VINF_SUCCESS)
9511 {
9512 *pu32Value = *pu32Src;
9513 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9514
9515 /* Commit the new RSP value. */
9516 if (rc == VINF_SUCCESS)
9517 pCtx->rsp = uNewRsp;
9518 }
9519
9520 return rc;
9521}
9522
9523
9524/**
9525 * Pops a qword from the stack.
9526 *
9527 * @returns Strict VBox status code.
9528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9529 * @param pu64Value Where to store the popped value.
9530 */
9531IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9532{
9533 /* Increment the stack pointer. */
9534 uint64_t uNewRsp;
9535 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9536 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9537
9538 /* Write the word the lazy way. */
9539 uint64_t const *pu64Src;
9540 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9541 if (rc == VINF_SUCCESS)
9542 {
9543 *pu64Value = *pu64Src;
9544 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9545
9546 /* Commit the new RSP value. */
9547 if (rc == VINF_SUCCESS)
9548 pCtx->rsp = uNewRsp;
9549 }
9550
9551 return rc;
9552}
9553
9554
9555/**
9556 * Pushes a word onto the stack, using a temporary stack pointer.
9557 *
9558 * @returns Strict VBox status code.
9559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9560 * @param u16Value The value to push.
9561 * @param pTmpRsp Pointer to the temporary stack pointer.
9562 */
9563IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9564{
9565 /* Increment the stack pointer. */
9566 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9567 RTUINT64U NewRsp = *pTmpRsp;
9568 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9569
9570 /* Write the word the lazy way. */
9571 uint16_t *pu16Dst;
9572 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9573 if (rc == VINF_SUCCESS)
9574 {
9575 *pu16Dst = u16Value;
9576 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9577 }
9578
9579 /* Commit the new RSP value unless we an access handler made trouble. */
9580 if (rc == VINF_SUCCESS)
9581 *pTmpRsp = NewRsp;
9582
9583 return rc;
9584}
9585
9586
9587/**
9588 * Pushes a dword onto the stack, using a temporary stack pointer.
9589 *
9590 * @returns Strict VBox status code.
9591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9592 * @param u32Value The value to push.
9593 * @param pTmpRsp Pointer to the temporary stack pointer.
9594 */
9595IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9596{
9597 /* Increment the stack pointer. */
9598 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9599 RTUINT64U NewRsp = *pTmpRsp;
9600 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9601
9602 /* Write the word the lazy way. */
9603 uint32_t *pu32Dst;
9604 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9605 if (rc == VINF_SUCCESS)
9606 {
9607 *pu32Dst = u32Value;
9608 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9609 }
9610
9611 /* Commit the new RSP value unless we an access handler made trouble. */
9612 if (rc == VINF_SUCCESS)
9613 *pTmpRsp = NewRsp;
9614
9615 return rc;
9616}
9617
9618
9619/**
9620 * Pushes a dword onto the stack, using a temporary stack pointer.
9621 *
9622 * @returns Strict VBox status code.
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param u64Value The value to push.
9625 * @param pTmpRsp Pointer to the temporary stack pointer.
9626 */
9627IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9628{
9629 /* Increment the stack pointer. */
9630 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9631 RTUINT64U NewRsp = *pTmpRsp;
9632 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9633
9634 /* Write the word the lazy way. */
9635 uint64_t *pu64Dst;
9636 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9637 if (rc == VINF_SUCCESS)
9638 {
9639 *pu64Dst = u64Value;
9640 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9641 }
9642
9643 /* Commit the new RSP value unless we an access handler made trouble. */
9644 if (rc == VINF_SUCCESS)
9645 *pTmpRsp = NewRsp;
9646
9647 return rc;
9648}
9649
9650
9651/**
9652 * Pops a word from the stack, using a temporary stack pointer.
9653 *
9654 * @returns Strict VBox status code.
9655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9656 * @param pu16Value Where to store the popped value.
9657 * @param pTmpRsp Pointer to the temporary stack pointer.
9658 */
9659IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9660{
9661 /* Increment the stack pointer. */
9662 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9663 RTUINT64U NewRsp = *pTmpRsp;
9664 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9665
9666 /* Write the word the lazy way. */
9667 uint16_t const *pu16Src;
9668 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9669 if (rc == VINF_SUCCESS)
9670 {
9671 *pu16Value = *pu16Src;
9672 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9673
9674 /* Commit the new RSP value. */
9675 if (rc == VINF_SUCCESS)
9676 *pTmpRsp = NewRsp;
9677 }
9678
9679 return rc;
9680}
9681
9682
9683/**
9684 * Pops a dword from the stack, using a temporary stack pointer.
9685 *
9686 * @returns Strict VBox status code.
9687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9688 * @param pu32Value Where to store the popped value.
9689 * @param pTmpRsp Pointer to the temporary stack pointer.
9690 */
9691IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9692{
9693 /* Increment the stack pointer. */
9694 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9695 RTUINT64U NewRsp = *pTmpRsp;
9696 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9697
9698 /* Write the word the lazy way. */
9699 uint32_t const *pu32Src;
9700 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9701 if (rc == VINF_SUCCESS)
9702 {
9703 *pu32Value = *pu32Src;
9704 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9705
9706 /* Commit the new RSP value. */
9707 if (rc == VINF_SUCCESS)
9708 *pTmpRsp = NewRsp;
9709 }
9710
9711 return rc;
9712}
9713
9714
9715/**
9716 * Pops a qword from the stack, using a temporary stack pointer.
9717 *
9718 * @returns Strict VBox status code.
9719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9720 * @param pu64Value Where to store the popped value.
9721 * @param pTmpRsp Pointer to the temporary stack pointer.
9722 */
9723IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9724{
9725 /* Increment the stack pointer. */
9726 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9727 RTUINT64U NewRsp = *pTmpRsp;
9728 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9729
9730 /* Write the word the lazy way. */
9731 uint64_t const *pu64Src;
9732 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9733 if (rcStrict == VINF_SUCCESS)
9734 {
9735 *pu64Value = *pu64Src;
9736 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9737
9738 /* Commit the new RSP value. */
9739 if (rcStrict == VINF_SUCCESS)
9740 *pTmpRsp = NewRsp;
9741 }
9742
9743 return rcStrict;
9744}
9745
9746
9747/**
9748 * Begin a special stack push (used by interrupt, exceptions and such).
9749 *
9750 * This will raise \#SS or \#PF if appropriate.
9751 *
9752 * @returns Strict VBox status code.
9753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9754 * @param cbMem The number of bytes to push onto the stack.
9755 * @param ppvMem Where to return the pointer to the stack memory.
9756 * As with the other memory functions this could be
9757 * direct access or bounce buffered access, so
9758 * don't commit register until the commit call
9759 * succeeds.
9760 * @param puNewRsp Where to return the new RSP value. This must be
9761 * passed unchanged to
9762 * iemMemStackPushCommitSpecial().
9763 */
9764IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
9765{
9766 Assert(cbMem < UINT8_MAX);
9767 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9768 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9769 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9770}
9771
9772
9773/**
9774 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
9775 *
9776 * This will update the rSP.
9777 *
9778 * @returns Strict VBox status code.
9779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9780 * @param pvMem The pointer returned by
9781 * iemMemStackPushBeginSpecial().
9782 * @param uNewRsp The new RSP value returned by
9783 * iemMemStackPushBeginSpecial().
9784 */
9785IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
9786{
9787 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
9788 if (rcStrict == VINF_SUCCESS)
9789 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9790 return rcStrict;
9791}
9792
9793
9794/**
9795 * Begin a special stack pop (used by iret, retf and such).
9796 *
9797 * This will raise \#SS or \#PF if appropriate.
9798 *
9799 * @returns Strict VBox status code.
9800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9801 * @param cbMem The number of bytes to push onto the stack.
9802 * @param ppvMem Where to return the pointer to the stack memory.
9803 * @param puNewRsp Where to return the new RSP value. This must be
9804 * passed unchanged to
9805 * iemMemStackPopCommitSpecial() or applied
9806 * manually if iemMemStackPopDoneSpecial() is used.
9807 */
9808IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9809{
9810 Assert(cbMem < UINT8_MAX);
9811 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9812 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9813 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9814}
9815
9816
9817/**
9818 * Continue a special stack pop (used by iret and retf).
9819 *
9820 * This will raise \#SS or \#PF if appropriate.
9821 *
9822 * @returns Strict VBox status code.
9823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9824 * @param cbMem The number of bytes to push onto the stack.
9825 * @param ppvMem Where to return the pointer to the stack memory.
9826 * @param puNewRsp Where to return the new RSP value. This must be
9827 * passed unchanged to
9828 * iemMemStackPopCommitSpecial() or applied
9829 * manually if iemMemStackPopDoneSpecial() is used.
9830 */
9831IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9832{
9833 Assert(cbMem < UINT8_MAX);
9834 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9835 RTUINT64U NewRsp;
9836 NewRsp.u = *puNewRsp;
9837 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9838 *puNewRsp = NewRsp.u;
9839 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9840}
9841
9842
9843/**
9844 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
9845 *
9846 * This will update the rSP.
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9850 * @param pvMem The pointer returned by
9851 * iemMemStackPopBeginSpecial().
9852 * @param uNewRsp The new RSP value returned by
9853 * iemMemStackPopBeginSpecial().
9854 */
9855IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PVMCPU pVCpu, void const *pvMem, uint64_t uNewRsp)
9856{
9857 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9858 if (rcStrict == VINF_SUCCESS)
9859 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9860 return rcStrict;
9861}
9862
9863
9864/**
9865 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
9866 * iemMemStackPopContinueSpecial).
9867 *
9868 * The caller will manually commit the rSP.
9869 *
9870 * @returns Strict VBox status code.
9871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9872 * @param pvMem The pointer returned by
9873 * iemMemStackPopBeginSpecial() or
9874 * iemMemStackPopContinueSpecial().
9875 */
9876IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
9877{
9878 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9879}
9880
9881
9882/**
9883 * Fetches a system table byte.
9884 *
9885 * @returns Strict VBox status code.
9886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9887 * @param pbDst Where to return the byte.
9888 * @param iSegReg The index of the segment register to use for
9889 * this access. The base and limits are checked.
9890 * @param GCPtrMem The address of the guest memory.
9891 */
9892IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9893{
9894 /* The lazy approach for now... */
9895 uint8_t const *pbSrc;
9896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
9897 if (rc == VINF_SUCCESS)
9898 {
9899 *pbDst = *pbSrc;
9900 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
9901 }
9902 return rc;
9903}
9904
9905
9906/**
9907 * Fetches a system table word.
9908 *
9909 * @returns Strict VBox status code.
9910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9911 * @param pu16Dst Where to return the word.
9912 * @param iSegReg The index of the segment register to use for
9913 * this access. The base and limits are checked.
9914 * @param GCPtrMem The address of the guest memory.
9915 */
9916IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9917{
9918 /* The lazy approach for now... */
9919 uint16_t const *pu16Src;
9920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
9921 if (rc == VINF_SUCCESS)
9922 {
9923 *pu16Dst = *pu16Src;
9924 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
9925 }
9926 return rc;
9927}
9928
9929
9930/**
9931 * Fetches a system table dword.
9932 *
9933 * @returns Strict VBox status code.
9934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9935 * @param pu32Dst Where to return the dword.
9936 * @param iSegReg The index of the segment register to use for
9937 * this access. The base and limits are checked.
9938 * @param GCPtrMem The address of the guest memory.
9939 */
9940IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9941{
9942 /* The lazy approach for now... */
9943 uint32_t const *pu32Src;
9944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
9945 if (rc == VINF_SUCCESS)
9946 {
9947 *pu32Dst = *pu32Src;
9948 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
9949 }
9950 return rc;
9951}
9952
9953
9954/**
9955 * Fetches a system table qword.
9956 *
9957 * @returns Strict VBox status code.
9958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9959 * @param pu64Dst Where to return the qword.
9960 * @param iSegReg The index of the segment register to use for
9961 * this access. The base and limits are checked.
9962 * @param GCPtrMem The address of the guest memory.
9963 */
9964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9965{
9966 /* The lazy approach for now... */
9967 uint64_t const *pu64Src;
9968 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
9969 if (rc == VINF_SUCCESS)
9970 {
9971 *pu64Dst = *pu64Src;
9972 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
9973 }
9974 return rc;
9975}
9976
9977
9978/**
9979 * Fetches a descriptor table entry with caller specified error code.
9980 *
9981 * @returns Strict VBox status code.
9982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9983 * @param pDesc Where to return the descriptor table entry.
9984 * @param uSel The selector which table entry to fetch.
9985 * @param uXcpt The exception to raise on table lookup error.
9986 * @param uErrorCode The error code associated with the exception.
9987 */
9988IEM_STATIC VBOXSTRICTRC
9989iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
9990{
9991 AssertPtr(pDesc);
9992 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9993
9994 /** @todo did the 286 require all 8 bytes to be accessible? */
9995 /*
9996 * Get the selector table base and check bounds.
9997 */
9998 RTGCPTR GCPtrBase;
9999 if (uSel & X86_SEL_LDT)
10000 {
10001 if ( !pCtx->ldtr.Attr.n.u1Present
10002 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10003 {
10004 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10005 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10006 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10007 uErrorCode, 0);
10008 }
10009
10010 Assert(pCtx->ldtr.Attr.n.u1Present);
10011 GCPtrBase = pCtx->ldtr.u64Base;
10012 }
10013 else
10014 {
10015 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10016 {
10017 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10018 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10019 uErrorCode, 0);
10020 }
10021 GCPtrBase = pCtx->gdtr.pGdt;
10022 }
10023
10024 /*
10025 * Read the legacy descriptor and maybe the long mode extensions if
10026 * required.
10027 */
10028 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10029 if (rcStrict == VINF_SUCCESS)
10030 {
10031 if ( !IEM_IS_LONG_MODE(pVCpu)
10032 || pDesc->Legacy.Gen.u1DescType)
10033 pDesc->Long.au64[1] = 0;
10034 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10035 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10036 else
10037 {
10038 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10039 /** @todo is this the right exception? */
10040 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10041 }
10042 }
10043 return rcStrict;
10044}
10045
10046
10047/**
10048 * Fetches a descriptor table entry.
10049 *
10050 * @returns Strict VBox status code.
10051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10052 * @param pDesc Where to return the descriptor table entry.
10053 * @param uSel The selector which table entry to fetch.
10054 * @param uXcpt The exception to raise on table lookup error.
10055 */
10056IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10057{
10058 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10059}
10060
10061
10062/**
10063 * Fakes a long mode stack selector for SS = 0.
10064 *
10065 * @param pDescSs Where to return the fake stack descriptor.
10066 * @param uDpl The DPL we want.
10067 */
10068IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10069{
10070 pDescSs->Long.au64[0] = 0;
10071 pDescSs->Long.au64[1] = 0;
10072 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10073 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10074 pDescSs->Long.Gen.u2Dpl = uDpl;
10075 pDescSs->Long.Gen.u1Present = 1;
10076 pDescSs->Long.Gen.u1Long = 1;
10077}
10078
10079
10080/**
10081 * Marks the selector descriptor as accessed (only non-system descriptors).
10082 *
10083 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10084 * will therefore skip the limit checks.
10085 *
10086 * @returns Strict VBox status code.
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param uSel The selector.
10089 */
10090IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10091{
10092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10093
10094 /*
10095 * Get the selector table base and calculate the entry address.
10096 */
10097 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10098 ? pCtx->ldtr.u64Base
10099 : pCtx->gdtr.pGdt;
10100 GCPtr += uSel & X86_SEL_MASK;
10101
10102 /*
10103 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10104 * ugly stuff to avoid this. This will make sure it's an atomic access
10105 * as well more or less remove any question about 8-bit or 32-bit accesss.
10106 */
10107 VBOXSTRICTRC rcStrict;
10108 uint32_t volatile *pu32;
10109 if ((GCPtr & 3) == 0)
10110 {
10111 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10112 GCPtr += 2 + 2;
10113 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10114 if (rcStrict != VINF_SUCCESS)
10115 return rcStrict;
10116 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10117 }
10118 else
10119 {
10120 /* The misaligned GDT/LDT case, map the whole thing. */
10121 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10122 if (rcStrict != VINF_SUCCESS)
10123 return rcStrict;
10124 switch ((uintptr_t)pu32 & 3)
10125 {
10126 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10127 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10128 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10129 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10130 }
10131 }
10132
10133 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10134}
10135
10136/** @} */
10137
10138
10139/*
10140 * Include the C/C++ implementation of instruction.
10141 */
10142#include "IEMAllCImpl.cpp.h"
10143
10144
10145
10146/** @name "Microcode" macros.
10147 *
10148 * The idea is that we should be able to use the same code to interpret
10149 * instructions as well as recompiler instructions. Thus this obfuscation.
10150 *
10151 * @{
10152 */
10153#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10154#define IEM_MC_END() }
10155#define IEM_MC_PAUSE() do {} while (0)
10156#define IEM_MC_CONTINUE() do {} while (0)
10157
10158/** Internal macro. */
10159#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10160 do \
10161 { \
10162 VBOXSTRICTRC rcStrict2 = a_Expr; \
10163 if (rcStrict2 != VINF_SUCCESS) \
10164 return rcStrict2; \
10165 } while (0)
10166
10167
10168#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10169#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10170#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10171#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10172#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10173#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10174#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10175#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10176#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10177 do { \
10178 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10179 return iemRaiseDeviceNotAvailable(pVCpu); \
10180 } while (0)
10181#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10182 do { \
10183 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10184 return iemRaiseMathFault(pVCpu); \
10185 } while (0)
10186#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10187 do { \
10188 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10189 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10190 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10191 return iemRaiseUndefinedOpcode(pVCpu); \
10192 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10193 return iemRaiseDeviceNotAvailable(pVCpu); \
10194 } while (0)
10195#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10196 do { \
10197 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10198 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10199 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10200 return iemRaiseUndefinedOpcode(pVCpu); \
10201 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10202 return iemRaiseDeviceNotAvailable(pVCpu); \
10203 } while (0)
10204#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10205 do { \
10206 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10207 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10208 return iemRaiseUndefinedOpcode(pVCpu); \
10209 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10210 return iemRaiseDeviceNotAvailable(pVCpu); \
10211 } while (0)
10212#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10213 do { \
10214 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10215 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10216 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10217 return iemRaiseUndefinedOpcode(pVCpu); \
10218 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10219 return iemRaiseDeviceNotAvailable(pVCpu); \
10220 } while (0)
10221#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10222 do { \
10223 if (pVCpu->iem.s.uCpl != 0) \
10224 return iemRaiseGeneralProtectionFault0(pVCpu); \
10225 } while (0)
10226
10227
10228#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10229#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10230#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10231#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10232#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10233#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10234#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10235 uint32_t a_Name; \
10236 uint32_t *a_pName = &a_Name
10237#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10238 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10239
10240#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10241#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10242
10243#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10244#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10245#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10246#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10247#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10248#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10249#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10250#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10251#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10252#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10253#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10254#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10255#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10256#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10257#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10258#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10259#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10260#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10261#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10262#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10263#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10264#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10265#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10266#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10267#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10268#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10269#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10270#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10271#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10272/** @note Not for IOPL or IF testing or modification. */
10273#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10274#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10275#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10276#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10277
10278#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10279#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10280#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10281#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10282#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10283#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10284#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10285#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10286#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10287#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10288#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10289 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10290
10291#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10292#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10293/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10294 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10295#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10296#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10297/** @note Not for IOPL or IF testing or modification. */
10298#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10299
10300#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10301#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10302#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10303 do { \
10304 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10305 *pu32Reg += (a_u32Value); \
10306 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10307 } while (0)
10308#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10309
10310#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10311#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10312#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10313 do { \
10314 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10315 *pu32Reg -= (a_u32Value); \
10316 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10317 } while (0)
10318#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10319#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10320
10321#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10322#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10323#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10324#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10325#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10326#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10327#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10328
10329#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10330#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10331#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10332#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10333
10334#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10335#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10336#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10337
10338#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10339#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10340#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10341
10342#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10343#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10344#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10345
10346#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10347#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10348#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10349
10350#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10351
10352#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10353
10354#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10355#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10356#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10357 do { \
10358 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10359 *pu32Reg &= (a_u32Value); \
10360 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10361 } while (0)
10362#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10363
10364#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10365#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10366#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10367 do { \
10368 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10369 *pu32Reg |= (a_u32Value); \
10370 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10371 } while (0)
10372#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10373
10374
10375/** @note Not for IOPL or IF modification. */
10376#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10377/** @note Not for IOPL or IF modification. */
10378#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10379/** @note Not for IOPL or IF modification. */
10380#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10381
10382#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10383
10384
10385#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10386 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10387#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10388 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10389#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10390 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10391#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10392 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10393#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10394 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10395#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10396 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10397#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10398 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10399
10400#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10401 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10402#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10403 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10404#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10405 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10406#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10407 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10408#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10409 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10410#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10411 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10412 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10413 } while (0)
10414#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10415 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10416 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10417 } while (0)
10418#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10419 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10420#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10421 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10422#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10423 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10424#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10425 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10426 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10427
10428#ifndef IEM_WITH_SETJMP
10429# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10430 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10431# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10432 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10433# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10434 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10435#else
10436# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10437 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10438# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10439 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10440# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10441 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10442#endif
10443
10444#ifndef IEM_WITH_SETJMP
10445# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10446 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10447# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10448 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10449# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10450 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10451#else
10452# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10453 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10454# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10455 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10456# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10457 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10458#endif
10459
10460#ifndef IEM_WITH_SETJMP
10461# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10462 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10463# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10464 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10465# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10466 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10467#else
10468# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10469 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10470# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10471 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10472# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10473 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10474#endif
10475
10476#ifdef SOME_UNUSED_FUNCTION
10477# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10478 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10479#endif
10480
10481#ifndef IEM_WITH_SETJMP
10482# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10484# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10485 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10486# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10487 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10488# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10489 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10490#else
10491# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10492 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10493# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10494 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10495# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10496 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10497# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10498 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10499#endif
10500
10501#ifndef IEM_WITH_SETJMP
10502# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10503 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10504# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10505 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10506# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10507 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10508#else
10509# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10510 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10511# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10512 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10513# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10514 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10515#endif
10516
10517#ifndef IEM_WITH_SETJMP
10518# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10519 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10520# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10522#else
10523# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10524 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10525# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10526 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10527#endif
10528
10529
10530
10531#ifndef IEM_WITH_SETJMP
10532# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10533 do { \
10534 uint8_t u8Tmp; \
10535 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10536 (a_u16Dst) = u8Tmp; \
10537 } while (0)
10538# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10539 do { \
10540 uint8_t u8Tmp; \
10541 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10542 (a_u32Dst) = u8Tmp; \
10543 } while (0)
10544# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10545 do { \
10546 uint8_t u8Tmp; \
10547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10548 (a_u64Dst) = u8Tmp; \
10549 } while (0)
10550# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10551 do { \
10552 uint16_t u16Tmp; \
10553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10554 (a_u32Dst) = u16Tmp; \
10555 } while (0)
10556# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10557 do { \
10558 uint16_t u16Tmp; \
10559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10560 (a_u64Dst) = u16Tmp; \
10561 } while (0)
10562# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10563 do { \
10564 uint32_t u32Tmp; \
10565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10566 (a_u64Dst) = u32Tmp; \
10567 } while (0)
10568#else /* IEM_WITH_SETJMP */
10569# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10570 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10571# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10572 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10573# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10574 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10575# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10576 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10577# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10578 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10579# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10580 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10581#endif /* IEM_WITH_SETJMP */
10582
10583#ifndef IEM_WITH_SETJMP
10584# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10585 do { \
10586 uint8_t u8Tmp; \
10587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10588 (a_u16Dst) = (int8_t)u8Tmp; \
10589 } while (0)
10590# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10591 do { \
10592 uint8_t u8Tmp; \
10593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10594 (a_u32Dst) = (int8_t)u8Tmp; \
10595 } while (0)
10596# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10597 do { \
10598 uint8_t u8Tmp; \
10599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10600 (a_u64Dst) = (int8_t)u8Tmp; \
10601 } while (0)
10602# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10603 do { \
10604 uint16_t u16Tmp; \
10605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10606 (a_u32Dst) = (int16_t)u16Tmp; \
10607 } while (0)
10608# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10609 do { \
10610 uint16_t u16Tmp; \
10611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10612 (a_u64Dst) = (int16_t)u16Tmp; \
10613 } while (0)
10614# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10615 do { \
10616 uint32_t u32Tmp; \
10617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10618 (a_u64Dst) = (int32_t)u32Tmp; \
10619 } while (0)
10620#else /* IEM_WITH_SETJMP */
10621# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10622 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10623# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10624 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10625# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10626 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10627# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10628 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10629# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10630 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10631# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10632 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10633#endif /* IEM_WITH_SETJMP */
10634
10635#ifndef IEM_WITH_SETJMP
10636# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10637 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10638# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10639 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10640# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10641 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10642# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10643 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10644#else
10645# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10646 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10647# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10648 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10649# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10650 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10651# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10652 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10653#endif
10654
10655#ifndef IEM_WITH_SETJMP
10656# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10657 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10658# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10659 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10660# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10661 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10662# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10663 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10664#else
10665# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10666 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10667# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10668 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10669# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10670 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10671# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10672 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10673#endif
10674
10675#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10676#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10677#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10678#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10679#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10680#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10681#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10682 do { \
10683 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10684 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10685 } while (0)
10686
10687#ifndef IEM_WITH_SETJMP
10688# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10689 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10690# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10691 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10692#else
10693# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10694 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10695# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10696 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10697#endif
10698
10699
10700#define IEM_MC_PUSH_U16(a_u16Value) \
10701 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10702#define IEM_MC_PUSH_U32(a_u32Value) \
10703 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10704#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10705 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10706#define IEM_MC_PUSH_U64(a_u64Value) \
10707 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10708
10709#define IEM_MC_POP_U16(a_pu16Value) \
10710 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10711#define IEM_MC_POP_U32(a_pu32Value) \
10712 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10713#define IEM_MC_POP_U64(a_pu64Value) \
10714 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10715
10716/** Maps guest memory for direct or bounce buffered access.
10717 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10718 * @remarks May return.
10719 */
10720#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10721 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10722
10723/** Maps guest memory for direct or bounce buffered access.
10724 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10725 * @remarks May return.
10726 */
10727#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10728 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10729
10730/** Commits the memory and unmaps the guest memory.
10731 * @remarks May return.
10732 */
10733#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10734 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10735
10736/** Commits the memory and unmaps the guest memory unless the FPU status word
10737 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10738 * that would cause FLD not to store.
10739 *
10740 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10741 * store, while \#P will not.
10742 *
10743 * @remarks May in theory return - for now.
10744 */
10745#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10746 do { \
10747 if ( !(a_u16FSW & X86_FSW_ES) \
10748 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10749 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10750 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10751 } while (0)
10752
10753/** Calculate efficient address from R/M. */
10754#ifndef IEM_WITH_SETJMP
10755# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10756 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10757#else
10758# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10759 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10760#endif
10761
10762#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10763#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10764#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10765#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10766#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10767#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10768#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10769
10770/**
10771 * Defers the rest of the instruction emulation to a C implementation routine
10772 * and returns, only taking the standard parameters.
10773 *
10774 * @param a_pfnCImpl The pointer to the C routine.
10775 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10776 */
10777#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10778
10779/**
10780 * Defers the rest of instruction emulation to a C implementation routine and
10781 * returns, taking one argument in addition to the standard ones.
10782 *
10783 * @param a_pfnCImpl The pointer to the C routine.
10784 * @param a0 The argument.
10785 */
10786#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10787
10788/**
10789 * Defers the rest of the instruction emulation to a C implementation routine
10790 * and returns, taking two arguments in addition to the standard ones.
10791 *
10792 * @param a_pfnCImpl The pointer to the C routine.
10793 * @param a0 The first extra argument.
10794 * @param a1 The second extra argument.
10795 */
10796#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10797
10798/**
10799 * Defers the rest of the instruction emulation to a C implementation routine
10800 * and returns, taking three arguments in addition to the standard ones.
10801 *
10802 * @param a_pfnCImpl The pointer to the C routine.
10803 * @param a0 The first extra argument.
10804 * @param a1 The second extra argument.
10805 * @param a2 The third extra argument.
10806 */
10807#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10808
10809/**
10810 * Defers the rest of the instruction emulation to a C implementation routine
10811 * and returns, taking four arguments in addition to the standard ones.
10812 *
10813 * @param a_pfnCImpl The pointer to the C routine.
10814 * @param a0 The first extra argument.
10815 * @param a1 The second extra argument.
10816 * @param a2 The third extra argument.
10817 * @param a3 The fourth extra argument.
10818 */
10819#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
10820
10821/**
10822 * Defers the rest of the instruction emulation to a C implementation routine
10823 * and returns, taking two arguments in addition to the standard ones.
10824 *
10825 * @param a_pfnCImpl The pointer to the C routine.
10826 * @param a0 The first extra argument.
10827 * @param a1 The second extra argument.
10828 * @param a2 The third extra argument.
10829 * @param a3 The fourth extra argument.
10830 * @param a4 The fifth extra argument.
10831 */
10832#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
10833
10834/**
10835 * Defers the entire instruction emulation to a C implementation routine and
10836 * returns, only taking the standard parameters.
10837 *
10838 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10839 *
10840 * @param a_pfnCImpl The pointer to the C routine.
10841 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10842 */
10843#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10844
10845/**
10846 * Defers the entire instruction emulation to a C implementation routine and
10847 * returns, taking one argument in addition to the standard ones.
10848 *
10849 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10850 *
10851 * @param a_pfnCImpl The pointer to the C routine.
10852 * @param a0 The argument.
10853 */
10854#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10855
10856/**
10857 * Defers the entire instruction emulation to a C implementation routine and
10858 * returns, taking two arguments in addition to the standard ones.
10859 *
10860 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10861 *
10862 * @param a_pfnCImpl The pointer to the C routine.
10863 * @param a0 The first extra argument.
10864 * @param a1 The second extra argument.
10865 */
10866#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10867
10868/**
10869 * Defers the entire instruction emulation to a C implementation routine and
10870 * returns, taking three arguments in addition to the standard ones.
10871 *
10872 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10873 *
10874 * @param a_pfnCImpl The pointer to the C routine.
10875 * @param a0 The first extra argument.
10876 * @param a1 The second extra argument.
10877 * @param a2 The third extra argument.
10878 */
10879#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10880
10881/**
10882 * Calls a FPU assembly implementation taking one visible argument.
10883 *
10884 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10885 * @param a0 The first extra argument.
10886 */
10887#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
10888 do { \
10889 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
10890 } while (0)
10891
10892/**
10893 * Calls a FPU assembly implementation taking two visible arguments.
10894 *
10895 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10896 * @param a0 The first extra argument.
10897 * @param a1 The second extra argument.
10898 */
10899#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
10900 do { \
10901 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
10902 } while (0)
10903
10904/**
10905 * Calls a FPU assembly implementation taking three visible arguments.
10906 *
10907 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10908 * @param a0 The first extra argument.
10909 * @param a1 The second extra argument.
10910 * @param a2 The third extra argument.
10911 */
10912#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
10913 do { \
10914 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
10915 } while (0)
10916
10917#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
10918 do { \
10919 (a_FpuData).FSW = (a_FSW); \
10920 (a_FpuData).r80Result = *(a_pr80Value); \
10921 } while (0)
10922
10923/** Pushes FPU result onto the stack. */
10924#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
10925 iemFpuPushResult(pVCpu, &a_FpuData)
10926/** Pushes FPU result onto the stack and sets the FPUDP. */
10927#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
10928 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
10929
10930/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
10931#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
10932 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
10933
10934/** Stores FPU result in a stack register. */
10935#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
10936 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
10937/** Stores FPU result in a stack register and pops the stack. */
10938#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
10939 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
10940/** Stores FPU result in a stack register and sets the FPUDP. */
10941#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
10942 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
10943/** Stores FPU result in a stack register, sets the FPUDP, and pops the
10944 * stack. */
10945#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
10946 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
10947
10948/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
10949#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
10950 iemFpuUpdateOpcodeAndIp(pVCpu)
10951/** Free a stack register (for FFREE and FFREEP). */
10952#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
10953 iemFpuStackFree(pVCpu, a_iStReg)
10954/** Increment the FPU stack pointer. */
10955#define IEM_MC_FPU_STACK_INC_TOP() \
10956 iemFpuStackIncTop(pVCpu)
10957/** Decrement the FPU stack pointer. */
10958#define IEM_MC_FPU_STACK_DEC_TOP() \
10959 iemFpuStackDecTop(pVCpu)
10960
10961/** Updates the FSW, FOP, FPUIP, and FPUCS. */
10962#define IEM_MC_UPDATE_FSW(a_u16FSW) \
10963 iemFpuUpdateFSW(pVCpu, a_u16FSW)
10964/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
10965#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
10966 iemFpuUpdateFSW(pVCpu, a_u16FSW)
10967/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
10968#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
10969 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
10970/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
10971#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
10972 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
10973/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
10974 * stack. */
10975#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
10976 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
10977/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
10978#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
10979 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
10980
10981/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
10982#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
10983 iemFpuStackUnderflow(pVCpu, a_iStDst)
10984/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
10985 * stack. */
10986#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
10987 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
10988/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
10989 * FPUDS. */
10990#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
10991 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
10992/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
10993 * FPUDS. Pops stack. */
10994#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
10995 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
10996/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
10997 * stack twice. */
10998#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
10999 iemFpuStackUnderflowThenPopPop(pVCpu)
11000/** Raises a FPU stack underflow exception for an instruction pushing a result
11001 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11002#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11003 iemFpuStackPushUnderflow(pVCpu)
11004/** Raises a FPU stack underflow exception for an instruction pushing a result
11005 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11006#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11007 iemFpuStackPushUnderflowTwo(pVCpu)
11008
11009/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11010 * FPUIP, FPUCS and FOP. */
11011#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11012 iemFpuStackPushOverflow(pVCpu)
11013/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11014 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11015#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11016 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11017/** Prepares for using the FPU state.
11018 * Ensures that we can use the host FPU in the current context (RC+R0.
11019 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11020#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11021/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11022#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11023/** Actualizes the guest FPU state so it can be accessed and modified. */
11024#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11025
11026/** Prepares for using the SSE state.
11027 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11028 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11029#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11030/** Actualizes the guest XMM0..15 register state for read-only access. */
11031#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11032/** Actualizes the guest XMM0..15 register state for read-write access. */
11033#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11034
11035/**
11036 * Calls a MMX assembly implementation taking two visible arguments.
11037 *
11038 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11039 * @param a0 The first extra argument.
11040 * @param a1 The second extra argument.
11041 */
11042#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11043 do { \
11044 IEM_MC_PREPARE_FPU_USAGE(); \
11045 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11046 } while (0)
11047
11048/**
11049 * Calls a MMX assembly implementation taking three visible arguments.
11050 *
11051 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11052 * @param a0 The first extra argument.
11053 * @param a1 The second extra argument.
11054 * @param a2 The third extra argument.
11055 */
11056#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11057 do { \
11058 IEM_MC_PREPARE_FPU_USAGE(); \
11059 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11060 } while (0)
11061
11062
11063/**
11064 * Calls a SSE assembly implementation taking two visible arguments.
11065 *
11066 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11067 * @param a0 The first extra argument.
11068 * @param a1 The second extra argument.
11069 */
11070#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11071 do { \
11072 IEM_MC_PREPARE_SSE_USAGE(); \
11073 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11074 } while (0)
11075
11076/**
11077 * Calls a SSE assembly implementation taking three visible arguments.
11078 *
11079 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11080 * @param a0 The first extra argument.
11081 * @param a1 The second extra argument.
11082 * @param a2 The third extra argument.
11083 */
11084#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11085 do { \
11086 IEM_MC_PREPARE_SSE_USAGE(); \
11087 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11088 } while (0)
11089
11090/** @note Not for IOPL or IF testing. */
11091#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11092/** @note Not for IOPL or IF testing. */
11093#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11094/** @note Not for IOPL or IF testing. */
11095#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11096/** @note Not for IOPL or IF testing. */
11097#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11098/** @note Not for IOPL or IF testing. */
11099#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11100 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11101 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11102/** @note Not for IOPL or IF testing. */
11103#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11104 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11105 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11106/** @note Not for IOPL or IF testing. */
11107#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11108 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11109 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11110 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11111/** @note Not for IOPL or IF testing. */
11112#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11113 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11114 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11115 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11116#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11117#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11118#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11119/** @note Not for IOPL or IF testing. */
11120#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11121 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11122 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11123/** @note Not for IOPL or IF testing. */
11124#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11125 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11126 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11127/** @note Not for IOPL or IF testing. */
11128#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11129 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11130 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11131/** @note Not for IOPL or IF testing. */
11132#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11133 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11134 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11135/** @note Not for IOPL or IF testing. */
11136#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11137 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11138 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11139/** @note Not for IOPL or IF testing. */
11140#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11141 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11142 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11143#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11144#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11145
11146#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11147 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11148#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11149 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11150#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11151 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11152#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11153 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11154#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11155 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11156#define IEM_MC_IF_FCW_IM() \
11157 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11158
11159#define IEM_MC_ELSE() } else {
11160#define IEM_MC_ENDIF() } do {} while (0)
11161
11162/** @} */
11163
11164
11165/** @name Opcode Debug Helpers.
11166 * @{
11167 */
11168#ifdef DEBUG
11169# define IEMOP_MNEMONIC(a_szMnemonic) \
11170 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11171 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11172# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11173 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11174 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11175#else
11176# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11177# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11178#endif
11179
11180/** @} */
11181
11182
11183/** @name Opcode Helpers.
11184 * @{
11185 */
11186
11187#ifdef IN_RING3
11188# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11189 do { \
11190 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11191 else \
11192 { \
11193 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11194 return IEMOP_RAISE_INVALID_OPCODE(); \
11195 } \
11196 } while (0)
11197#else
11198# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11199 do { \
11200 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11201 else return IEMOP_RAISE_INVALID_OPCODE(); \
11202 } while (0)
11203#endif
11204
11205/** The instruction requires a 186 or later. */
11206#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11207# define IEMOP_HLP_MIN_186() do { } while (0)
11208#else
11209# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11210#endif
11211
11212/** The instruction requires a 286 or later. */
11213#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11214# define IEMOP_HLP_MIN_286() do { } while (0)
11215#else
11216# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11217#endif
11218
11219/** The instruction requires a 386 or later. */
11220#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11221# define IEMOP_HLP_MIN_386() do { } while (0)
11222#else
11223# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11224#endif
11225
11226/** The instruction requires a 386 or later if the given expression is true. */
11227#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11228# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11229#else
11230# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11231#endif
11232
11233/** The instruction requires a 486 or later. */
11234#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11235# define IEMOP_HLP_MIN_486() do { } while (0)
11236#else
11237# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11238#endif
11239
11240/** The instruction requires a Pentium (586) or later. */
11241#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11242# define IEMOP_HLP_MIN_586() do { } while (0)
11243#else
11244# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11245#endif
11246
11247/** The instruction requires a PentiumPro (686) or later. */
11248#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11249# define IEMOP_HLP_MIN_686() do { } while (0)
11250#else
11251# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11252#endif
11253
11254
11255/** The instruction raises an \#UD in real and V8086 mode. */
11256#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11257 do \
11258 { \
11259 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11260 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11261 } while (0)
11262
11263/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11264 * 64-bit mode. */
11265#define IEMOP_HLP_NO_64BIT() \
11266 do \
11267 { \
11268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11269 return IEMOP_RAISE_INVALID_OPCODE(); \
11270 } while (0)
11271
11272/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11273 * 64-bit mode. */
11274#define IEMOP_HLP_ONLY_64BIT() \
11275 do \
11276 { \
11277 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11278 return IEMOP_RAISE_INVALID_OPCODE(); \
11279 } while (0)
11280
11281/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11282#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11283 do \
11284 { \
11285 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11286 iemRecalEffOpSize64Default(pVCpu); \
11287 } while (0)
11288
11289/** The instruction has 64-bit operand size if 64-bit mode. */
11290#define IEMOP_HLP_64BIT_OP_SIZE() \
11291 do \
11292 { \
11293 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11294 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11295 } while (0)
11296
11297/** Only a REX prefix immediately preceeding the first opcode byte takes
11298 * effect. This macro helps ensuring this as well as logging bad guest code. */
11299#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11300 do \
11301 { \
11302 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11303 { \
11304 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11305 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11306 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11307 pVCpu->iem.s.uRexB = 0; \
11308 pVCpu->iem.s.uRexIndex = 0; \
11309 pVCpu->iem.s.uRexReg = 0; \
11310 iemRecalEffOpSize(pVCpu); \
11311 } \
11312 } while (0)
11313
11314/**
11315 * Done decoding.
11316 */
11317#define IEMOP_HLP_DONE_DECODING() \
11318 do \
11319 { \
11320 /*nothing for now, maybe later... */ \
11321 } while (0)
11322
11323/**
11324 * Done decoding, raise \#UD exception if lock prefix present.
11325 */
11326#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11327 do \
11328 { \
11329 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11330 { /* likely */ } \
11331 else \
11332 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11333 } while (0)
11334#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11335 do \
11336 { \
11337 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11338 { /* likely */ } \
11339 else \
11340 { \
11341 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11342 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11343 } \
11344 } while (0)
11345#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11346 do \
11347 { \
11348 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11349 { /* likely */ } \
11350 else \
11351 { \
11352 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11353 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11354 } \
11355 } while (0)
11356
11357/**
11358 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11359 * are present.
11360 */
11361#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11362 do \
11363 { \
11364 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11365 { /* likely */ } \
11366 else \
11367 return IEMOP_RAISE_INVALID_OPCODE(); \
11368 } while (0)
11369
11370
11371/**
11372 * Calculates the effective address of a ModR/M memory operand.
11373 *
11374 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11375 *
11376 * @return Strict VBox status code.
11377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11378 * @param bRm The ModRM byte.
11379 * @param cbImm The size of any immediate following the
11380 * effective address opcode bytes. Important for
11381 * RIP relative addressing.
11382 * @param pGCPtrEff Where to return the effective address.
11383 */
11384IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11385{
11386 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11387 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11388# define SET_SS_DEF() \
11389 do \
11390 { \
11391 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11392 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11393 } while (0)
11394
11395 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11396 {
11397/** @todo Check the effective address size crap! */
11398 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11399 {
11400 uint16_t u16EffAddr;
11401
11402 /* Handle the disp16 form with no registers first. */
11403 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11404 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11405 else
11406 {
11407 /* Get the displacment. */
11408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11409 {
11410 case 0: u16EffAddr = 0; break;
11411 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11412 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11413 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11414 }
11415
11416 /* Add the base and index registers to the disp. */
11417 switch (bRm & X86_MODRM_RM_MASK)
11418 {
11419 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11420 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11421 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11422 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11423 case 4: u16EffAddr += pCtx->si; break;
11424 case 5: u16EffAddr += pCtx->di; break;
11425 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11426 case 7: u16EffAddr += pCtx->bx; break;
11427 }
11428 }
11429
11430 *pGCPtrEff = u16EffAddr;
11431 }
11432 else
11433 {
11434 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11435 uint32_t u32EffAddr;
11436
11437 /* Handle the disp32 form with no registers first. */
11438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11439 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11440 else
11441 {
11442 /* Get the register (or SIB) value. */
11443 switch ((bRm & X86_MODRM_RM_MASK))
11444 {
11445 case 0: u32EffAddr = pCtx->eax; break;
11446 case 1: u32EffAddr = pCtx->ecx; break;
11447 case 2: u32EffAddr = pCtx->edx; break;
11448 case 3: u32EffAddr = pCtx->ebx; break;
11449 case 4: /* SIB */
11450 {
11451 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11452
11453 /* Get the index and scale it. */
11454 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11455 {
11456 case 0: u32EffAddr = pCtx->eax; break;
11457 case 1: u32EffAddr = pCtx->ecx; break;
11458 case 2: u32EffAddr = pCtx->edx; break;
11459 case 3: u32EffAddr = pCtx->ebx; break;
11460 case 4: u32EffAddr = 0; /*none */ break;
11461 case 5: u32EffAddr = pCtx->ebp; break;
11462 case 6: u32EffAddr = pCtx->esi; break;
11463 case 7: u32EffAddr = pCtx->edi; break;
11464 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11465 }
11466 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11467
11468 /* add base */
11469 switch (bSib & X86_SIB_BASE_MASK)
11470 {
11471 case 0: u32EffAddr += pCtx->eax; break;
11472 case 1: u32EffAddr += pCtx->ecx; break;
11473 case 2: u32EffAddr += pCtx->edx; break;
11474 case 3: u32EffAddr += pCtx->ebx; break;
11475 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11476 case 5:
11477 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11478 {
11479 u32EffAddr += pCtx->ebp;
11480 SET_SS_DEF();
11481 }
11482 else
11483 {
11484 uint32_t u32Disp;
11485 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11486 u32EffAddr += u32Disp;
11487 }
11488 break;
11489 case 6: u32EffAddr += pCtx->esi; break;
11490 case 7: u32EffAddr += pCtx->edi; break;
11491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11492 }
11493 break;
11494 }
11495 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11496 case 6: u32EffAddr = pCtx->esi; break;
11497 case 7: u32EffAddr = pCtx->edi; break;
11498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11499 }
11500
11501 /* Get and add the displacement. */
11502 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11503 {
11504 case 0:
11505 break;
11506 case 1:
11507 {
11508 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11509 u32EffAddr += i8Disp;
11510 break;
11511 }
11512 case 2:
11513 {
11514 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11515 u32EffAddr += u32Disp;
11516 break;
11517 }
11518 default:
11519 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11520 }
11521
11522 }
11523 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11524 *pGCPtrEff = u32EffAddr;
11525 else
11526 {
11527 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11528 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11529 }
11530 }
11531 }
11532 else
11533 {
11534 uint64_t u64EffAddr;
11535
11536 /* Handle the rip+disp32 form with no registers first. */
11537 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11538 {
11539 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11540 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11541 }
11542 else
11543 {
11544 /* Get the register (or SIB) value. */
11545 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11546 {
11547 case 0: u64EffAddr = pCtx->rax; break;
11548 case 1: u64EffAddr = pCtx->rcx; break;
11549 case 2: u64EffAddr = pCtx->rdx; break;
11550 case 3: u64EffAddr = pCtx->rbx; break;
11551 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11552 case 6: u64EffAddr = pCtx->rsi; break;
11553 case 7: u64EffAddr = pCtx->rdi; break;
11554 case 8: u64EffAddr = pCtx->r8; break;
11555 case 9: u64EffAddr = pCtx->r9; break;
11556 case 10: u64EffAddr = pCtx->r10; break;
11557 case 11: u64EffAddr = pCtx->r11; break;
11558 case 13: u64EffAddr = pCtx->r13; break;
11559 case 14: u64EffAddr = pCtx->r14; break;
11560 case 15: u64EffAddr = pCtx->r15; break;
11561 /* SIB */
11562 case 4:
11563 case 12:
11564 {
11565 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11566
11567 /* Get the index and scale it. */
11568 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11569 {
11570 case 0: u64EffAddr = pCtx->rax; break;
11571 case 1: u64EffAddr = pCtx->rcx; break;
11572 case 2: u64EffAddr = pCtx->rdx; break;
11573 case 3: u64EffAddr = pCtx->rbx; break;
11574 case 4: u64EffAddr = 0; /*none */ break;
11575 case 5: u64EffAddr = pCtx->rbp; break;
11576 case 6: u64EffAddr = pCtx->rsi; break;
11577 case 7: u64EffAddr = pCtx->rdi; break;
11578 case 8: u64EffAddr = pCtx->r8; break;
11579 case 9: u64EffAddr = pCtx->r9; break;
11580 case 10: u64EffAddr = pCtx->r10; break;
11581 case 11: u64EffAddr = pCtx->r11; break;
11582 case 12: u64EffAddr = pCtx->r12; break;
11583 case 13: u64EffAddr = pCtx->r13; break;
11584 case 14: u64EffAddr = pCtx->r14; break;
11585 case 15: u64EffAddr = pCtx->r15; break;
11586 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11587 }
11588 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11589
11590 /* add base */
11591 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11592 {
11593 case 0: u64EffAddr += pCtx->rax; break;
11594 case 1: u64EffAddr += pCtx->rcx; break;
11595 case 2: u64EffAddr += pCtx->rdx; break;
11596 case 3: u64EffAddr += pCtx->rbx; break;
11597 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11598 case 6: u64EffAddr += pCtx->rsi; break;
11599 case 7: u64EffAddr += pCtx->rdi; break;
11600 case 8: u64EffAddr += pCtx->r8; break;
11601 case 9: u64EffAddr += pCtx->r9; break;
11602 case 10: u64EffAddr += pCtx->r10; break;
11603 case 11: u64EffAddr += pCtx->r11; break;
11604 case 12: u64EffAddr += pCtx->r12; break;
11605 case 14: u64EffAddr += pCtx->r14; break;
11606 case 15: u64EffAddr += pCtx->r15; break;
11607 /* complicated encodings */
11608 case 5:
11609 case 13:
11610 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11611 {
11612 if (!pVCpu->iem.s.uRexB)
11613 {
11614 u64EffAddr += pCtx->rbp;
11615 SET_SS_DEF();
11616 }
11617 else
11618 u64EffAddr += pCtx->r13;
11619 }
11620 else
11621 {
11622 uint32_t u32Disp;
11623 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11624 u64EffAddr += (int32_t)u32Disp;
11625 }
11626 break;
11627 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11628 }
11629 break;
11630 }
11631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11632 }
11633
11634 /* Get and add the displacement. */
11635 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11636 {
11637 case 0:
11638 break;
11639 case 1:
11640 {
11641 int8_t i8Disp;
11642 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11643 u64EffAddr += i8Disp;
11644 break;
11645 }
11646 case 2:
11647 {
11648 uint32_t u32Disp;
11649 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11650 u64EffAddr += (int32_t)u32Disp;
11651 break;
11652 }
11653 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11654 }
11655
11656 }
11657
11658 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11659 *pGCPtrEff = u64EffAddr;
11660 else
11661 {
11662 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11663 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11664 }
11665 }
11666
11667 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11668 return VINF_SUCCESS;
11669}
11670
11671
11672/**
11673 * Calculates the effective address of a ModR/M memory operand.
11674 *
11675 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11676 *
11677 * @return Strict VBox status code.
11678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11679 * @param bRm The ModRM byte.
11680 * @param cbImm The size of any immediate following the
11681 * effective address opcode bytes. Important for
11682 * RIP relative addressing.
11683 * @param pGCPtrEff Where to return the effective address.
11684 * @param offRsp RSP displacement.
11685 */
11686IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11687{
11688 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11689 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11690# define SET_SS_DEF() \
11691 do \
11692 { \
11693 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11694 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11695 } while (0)
11696
11697 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11698 {
11699/** @todo Check the effective address size crap! */
11700 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11701 {
11702 uint16_t u16EffAddr;
11703
11704 /* Handle the disp16 form with no registers first. */
11705 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11706 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11707 else
11708 {
11709 /* Get the displacment. */
11710 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11711 {
11712 case 0: u16EffAddr = 0; break;
11713 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11714 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11715 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11716 }
11717
11718 /* Add the base and index registers to the disp. */
11719 switch (bRm & X86_MODRM_RM_MASK)
11720 {
11721 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11722 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11723 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11724 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11725 case 4: u16EffAddr += pCtx->si; break;
11726 case 5: u16EffAddr += pCtx->di; break;
11727 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11728 case 7: u16EffAddr += pCtx->bx; break;
11729 }
11730 }
11731
11732 *pGCPtrEff = u16EffAddr;
11733 }
11734 else
11735 {
11736 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11737 uint32_t u32EffAddr;
11738
11739 /* Handle the disp32 form with no registers first. */
11740 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11741 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11742 else
11743 {
11744 /* Get the register (or SIB) value. */
11745 switch ((bRm & X86_MODRM_RM_MASK))
11746 {
11747 case 0: u32EffAddr = pCtx->eax; break;
11748 case 1: u32EffAddr = pCtx->ecx; break;
11749 case 2: u32EffAddr = pCtx->edx; break;
11750 case 3: u32EffAddr = pCtx->ebx; break;
11751 case 4: /* SIB */
11752 {
11753 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11754
11755 /* Get the index and scale it. */
11756 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11757 {
11758 case 0: u32EffAddr = pCtx->eax; break;
11759 case 1: u32EffAddr = pCtx->ecx; break;
11760 case 2: u32EffAddr = pCtx->edx; break;
11761 case 3: u32EffAddr = pCtx->ebx; break;
11762 case 4: u32EffAddr = 0; /*none */ break;
11763 case 5: u32EffAddr = pCtx->ebp; break;
11764 case 6: u32EffAddr = pCtx->esi; break;
11765 case 7: u32EffAddr = pCtx->edi; break;
11766 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11767 }
11768 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11769
11770 /* add base */
11771 switch (bSib & X86_SIB_BASE_MASK)
11772 {
11773 case 0: u32EffAddr += pCtx->eax; break;
11774 case 1: u32EffAddr += pCtx->ecx; break;
11775 case 2: u32EffAddr += pCtx->edx; break;
11776 case 3: u32EffAddr += pCtx->ebx; break;
11777 case 4:
11778 u32EffAddr += pCtx->esp + offRsp;
11779 SET_SS_DEF();
11780 break;
11781 case 5:
11782 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11783 {
11784 u32EffAddr += pCtx->ebp;
11785 SET_SS_DEF();
11786 }
11787 else
11788 {
11789 uint32_t u32Disp;
11790 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11791 u32EffAddr += u32Disp;
11792 }
11793 break;
11794 case 6: u32EffAddr += pCtx->esi; break;
11795 case 7: u32EffAddr += pCtx->edi; break;
11796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11797 }
11798 break;
11799 }
11800 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11801 case 6: u32EffAddr = pCtx->esi; break;
11802 case 7: u32EffAddr = pCtx->edi; break;
11803 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11804 }
11805
11806 /* Get and add the displacement. */
11807 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11808 {
11809 case 0:
11810 break;
11811 case 1:
11812 {
11813 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11814 u32EffAddr += i8Disp;
11815 break;
11816 }
11817 case 2:
11818 {
11819 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11820 u32EffAddr += u32Disp;
11821 break;
11822 }
11823 default:
11824 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11825 }
11826
11827 }
11828 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11829 *pGCPtrEff = u32EffAddr;
11830 else
11831 {
11832 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11833 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11834 }
11835 }
11836 }
11837 else
11838 {
11839 uint64_t u64EffAddr;
11840
11841 /* Handle the rip+disp32 form with no registers first. */
11842 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11843 {
11844 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11845 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11846 }
11847 else
11848 {
11849 /* Get the register (or SIB) value. */
11850 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11851 {
11852 case 0: u64EffAddr = pCtx->rax; break;
11853 case 1: u64EffAddr = pCtx->rcx; break;
11854 case 2: u64EffAddr = pCtx->rdx; break;
11855 case 3: u64EffAddr = pCtx->rbx; break;
11856 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11857 case 6: u64EffAddr = pCtx->rsi; break;
11858 case 7: u64EffAddr = pCtx->rdi; break;
11859 case 8: u64EffAddr = pCtx->r8; break;
11860 case 9: u64EffAddr = pCtx->r9; break;
11861 case 10: u64EffAddr = pCtx->r10; break;
11862 case 11: u64EffAddr = pCtx->r11; break;
11863 case 13: u64EffAddr = pCtx->r13; break;
11864 case 14: u64EffAddr = pCtx->r14; break;
11865 case 15: u64EffAddr = pCtx->r15; break;
11866 /* SIB */
11867 case 4:
11868 case 12:
11869 {
11870 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11871
11872 /* Get the index and scale it. */
11873 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11874 {
11875 case 0: u64EffAddr = pCtx->rax; break;
11876 case 1: u64EffAddr = pCtx->rcx; break;
11877 case 2: u64EffAddr = pCtx->rdx; break;
11878 case 3: u64EffAddr = pCtx->rbx; break;
11879 case 4: u64EffAddr = 0; /*none */ break;
11880 case 5: u64EffAddr = pCtx->rbp; break;
11881 case 6: u64EffAddr = pCtx->rsi; break;
11882 case 7: u64EffAddr = pCtx->rdi; break;
11883 case 8: u64EffAddr = pCtx->r8; break;
11884 case 9: u64EffAddr = pCtx->r9; break;
11885 case 10: u64EffAddr = pCtx->r10; break;
11886 case 11: u64EffAddr = pCtx->r11; break;
11887 case 12: u64EffAddr = pCtx->r12; break;
11888 case 13: u64EffAddr = pCtx->r13; break;
11889 case 14: u64EffAddr = pCtx->r14; break;
11890 case 15: u64EffAddr = pCtx->r15; break;
11891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11892 }
11893 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11894
11895 /* add base */
11896 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11897 {
11898 case 0: u64EffAddr += pCtx->rax; break;
11899 case 1: u64EffAddr += pCtx->rcx; break;
11900 case 2: u64EffAddr += pCtx->rdx; break;
11901 case 3: u64EffAddr += pCtx->rbx; break;
11902 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
11903 case 6: u64EffAddr += pCtx->rsi; break;
11904 case 7: u64EffAddr += pCtx->rdi; break;
11905 case 8: u64EffAddr += pCtx->r8; break;
11906 case 9: u64EffAddr += pCtx->r9; break;
11907 case 10: u64EffAddr += pCtx->r10; break;
11908 case 11: u64EffAddr += pCtx->r11; break;
11909 case 12: u64EffAddr += pCtx->r12; break;
11910 case 14: u64EffAddr += pCtx->r14; break;
11911 case 15: u64EffAddr += pCtx->r15; break;
11912 /* complicated encodings */
11913 case 5:
11914 case 13:
11915 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11916 {
11917 if (!pVCpu->iem.s.uRexB)
11918 {
11919 u64EffAddr += pCtx->rbp;
11920 SET_SS_DEF();
11921 }
11922 else
11923 u64EffAddr += pCtx->r13;
11924 }
11925 else
11926 {
11927 uint32_t u32Disp;
11928 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11929 u64EffAddr += (int32_t)u32Disp;
11930 }
11931 break;
11932 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11933 }
11934 break;
11935 }
11936 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11937 }
11938
11939 /* Get and add the displacement. */
11940 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11941 {
11942 case 0:
11943 break;
11944 case 1:
11945 {
11946 int8_t i8Disp;
11947 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11948 u64EffAddr += i8Disp;
11949 break;
11950 }
11951 case 2:
11952 {
11953 uint32_t u32Disp;
11954 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11955 u64EffAddr += (int32_t)u32Disp;
11956 break;
11957 }
11958 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11959 }
11960
11961 }
11962
11963 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11964 *pGCPtrEff = u64EffAddr;
11965 else
11966 {
11967 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11968 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11969 }
11970 }
11971
11972 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11973 return VINF_SUCCESS;
11974}
11975
11976
11977#ifdef IEM_WITH_SETJMP
11978/**
11979 * Calculates the effective address of a ModR/M memory operand.
11980 *
11981 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11982 *
11983 * May longjmp on internal error.
11984 *
11985 * @return The effective address.
11986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11987 * @param bRm The ModRM byte.
11988 * @param cbImm The size of any immediate following the
11989 * effective address opcode bytes. Important for
11990 * RIP relative addressing.
11991 */
11992IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
11993{
11994 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
11995 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11996# define SET_SS_DEF() \
11997 do \
11998 { \
11999 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12000 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12001 } while (0)
12002
12003 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12004 {
12005/** @todo Check the effective address size crap! */
12006 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12007 {
12008 uint16_t u16EffAddr;
12009
12010 /* Handle the disp16 form with no registers first. */
12011 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12012 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12013 else
12014 {
12015 /* Get the displacment. */
12016 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12017 {
12018 case 0: u16EffAddr = 0; break;
12019 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12020 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12021 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12022 }
12023
12024 /* Add the base and index registers to the disp. */
12025 switch (bRm & X86_MODRM_RM_MASK)
12026 {
12027 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12028 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12029 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12030 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12031 case 4: u16EffAddr += pCtx->si; break;
12032 case 5: u16EffAddr += pCtx->di; break;
12033 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12034 case 7: u16EffAddr += pCtx->bx; break;
12035 }
12036 }
12037
12038 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12039 return u16EffAddr;
12040 }
12041
12042 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12043 uint32_t u32EffAddr;
12044
12045 /* Handle the disp32 form with no registers first. */
12046 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12047 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12048 else
12049 {
12050 /* Get the register (or SIB) value. */
12051 switch ((bRm & X86_MODRM_RM_MASK))
12052 {
12053 case 0: u32EffAddr = pCtx->eax; break;
12054 case 1: u32EffAddr = pCtx->ecx; break;
12055 case 2: u32EffAddr = pCtx->edx; break;
12056 case 3: u32EffAddr = pCtx->ebx; break;
12057 case 4: /* SIB */
12058 {
12059 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12060
12061 /* Get the index and scale it. */
12062 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12063 {
12064 case 0: u32EffAddr = pCtx->eax; break;
12065 case 1: u32EffAddr = pCtx->ecx; break;
12066 case 2: u32EffAddr = pCtx->edx; break;
12067 case 3: u32EffAddr = pCtx->ebx; break;
12068 case 4: u32EffAddr = 0; /*none */ break;
12069 case 5: u32EffAddr = pCtx->ebp; break;
12070 case 6: u32EffAddr = pCtx->esi; break;
12071 case 7: u32EffAddr = pCtx->edi; break;
12072 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12073 }
12074 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12075
12076 /* add base */
12077 switch (bSib & X86_SIB_BASE_MASK)
12078 {
12079 case 0: u32EffAddr += pCtx->eax; break;
12080 case 1: u32EffAddr += pCtx->ecx; break;
12081 case 2: u32EffAddr += pCtx->edx; break;
12082 case 3: u32EffAddr += pCtx->ebx; break;
12083 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12084 case 5:
12085 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12086 {
12087 u32EffAddr += pCtx->ebp;
12088 SET_SS_DEF();
12089 }
12090 else
12091 {
12092 uint32_t u32Disp;
12093 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12094 u32EffAddr += u32Disp;
12095 }
12096 break;
12097 case 6: u32EffAddr += pCtx->esi; break;
12098 case 7: u32EffAddr += pCtx->edi; break;
12099 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12100 }
12101 break;
12102 }
12103 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12104 case 6: u32EffAddr = pCtx->esi; break;
12105 case 7: u32EffAddr = pCtx->edi; break;
12106 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12107 }
12108
12109 /* Get and add the displacement. */
12110 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12111 {
12112 case 0:
12113 break;
12114 case 1:
12115 {
12116 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12117 u32EffAddr += i8Disp;
12118 break;
12119 }
12120 case 2:
12121 {
12122 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12123 u32EffAddr += u32Disp;
12124 break;
12125 }
12126 default:
12127 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12128 }
12129 }
12130
12131 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12132 {
12133 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12134 return u32EffAddr;
12135 }
12136 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12137 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12138 return u32EffAddr & UINT16_MAX;
12139 }
12140
12141 uint64_t u64EffAddr;
12142
12143 /* Handle the rip+disp32 form with no registers first. */
12144 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12145 {
12146 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12147 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12148 }
12149 else
12150 {
12151 /* Get the register (or SIB) value. */
12152 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12153 {
12154 case 0: u64EffAddr = pCtx->rax; break;
12155 case 1: u64EffAddr = pCtx->rcx; break;
12156 case 2: u64EffAddr = pCtx->rdx; break;
12157 case 3: u64EffAddr = pCtx->rbx; break;
12158 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12159 case 6: u64EffAddr = pCtx->rsi; break;
12160 case 7: u64EffAddr = pCtx->rdi; break;
12161 case 8: u64EffAddr = pCtx->r8; break;
12162 case 9: u64EffAddr = pCtx->r9; break;
12163 case 10: u64EffAddr = pCtx->r10; break;
12164 case 11: u64EffAddr = pCtx->r11; break;
12165 case 13: u64EffAddr = pCtx->r13; break;
12166 case 14: u64EffAddr = pCtx->r14; break;
12167 case 15: u64EffAddr = pCtx->r15; break;
12168 /* SIB */
12169 case 4:
12170 case 12:
12171 {
12172 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12173
12174 /* Get the index and scale it. */
12175 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12176 {
12177 case 0: u64EffAddr = pCtx->rax; break;
12178 case 1: u64EffAddr = pCtx->rcx; break;
12179 case 2: u64EffAddr = pCtx->rdx; break;
12180 case 3: u64EffAddr = pCtx->rbx; break;
12181 case 4: u64EffAddr = 0; /*none */ break;
12182 case 5: u64EffAddr = pCtx->rbp; break;
12183 case 6: u64EffAddr = pCtx->rsi; break;
12184 case 7: u64EffAddr = pCtx->rdi; break;
12185 case 8: u64EffAddr = pCtx->r8; break;
12186 case 9: u64EffAddr = pCtx->r9; break;
12187 case 10: u64EffAddr = pCtx->r10; break;
12188 case 11: u64EffAddr = pCtx->r11; break;
12189 case 12: u64EffAddr = pCtx->r12; break;
12190 case 13: u64EffAddr = pCtx->r13; break;
12191 case 14: u64EffAddr = pCtx->r14; break;
12192 case 15: u64EffAddr = pCtx->r15; break;
12193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12194 }
12195 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12196
12197 /* add base */
12198 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12199 {
12200 case 0: u64EffAddr += pCtx->rax; break;
12201 case 1: u64EffAddr += pCtx->rcx; break;
12202 case 2: u64EffAddr += pCtx->rdx; break;
12203 case 3: u64EffAddr += pCtx->rbx; break;
12204 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12205 case 6: u64EffAddr += pCtx->rsi; break;
12206 case 7: u64EffAddr += pCtx->rdi; break;
12207 case 8: u64EffAddr += pCtx->r8; break;
12208 case 9: u64EffAddr += pCtx->r9; break;
12209 case 10: u64EffAddr += pCtx->r10; break;
12210 case 11: u64EffAddr += pCtx->r11; break;
12211 case 12: u64EffAddr += pCtx->r12; break;
12212 case 14: u64EffAddr += pCtx->r14; break;
12213 case 15: u64EffAddr += pCtx->r15; break;
12214 /* complicated encodings */
12215 case 5:
12216 case 13:
12217 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12218 {
12219 if (!pVCpu->iem.s.uRexB)
12220 {
12221 u64EffAddr += pCtx->rbp;
12222 SET_SS_DEF();
12223 }
12224 else
12225 u64EffAddr += pCtx->r13;
12226 }
12227 else
12228 {
12229 uint32_t u32Disp;
12230 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12231 u64EffAddr += (int32_t)u32Disp;
12232 }
12233 break;
12234 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12235 }
12236 break;
12237 }
12238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12239 }
12240
12241 /* Get and add the displacement. */
12242 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12243 {
12244 case 0:
12245 break;
12246 case 1:
12247 {
12248 int8_t i8Disp;
12249 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12250 u64EffAddr += i8Disp;
12251 break;
12252 }
12253 case 2:
12254 {
12255 uint32_t u32Disp;
12256 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12257 u64EffAddr += (int32_t)u32Disp;
12258 break;
12259 }
12260 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12261 }
12262
12263 }
12264
12265 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12266 {
12267 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12268 return u64EffAddr;
12269 }
12270 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12271 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12272 return u64EffAddr & UINT32_MAX;
12273}
12274#endif /* IEM_WITH_SETJMP */
12275
12276
12277/** @} */
12278
12279
12280
12281/*
12282 * Include the instructions
12283 */
12284#include "IEMAllInstructions.cpp.h"
12285
12286
12287
12288
12289#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12290
12291/**
12292 * Sets up execution verification mode.
12293 */
12294IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12295{
12296 PVMCPU pVCpu = pVCpu;
12297 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12298
12299 /*
12300 * Always note down the address of the current instruction.
12301 */
12302 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12303 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12304
12305 /*
12306 * Enable verification and/or logging.
12307 */
12308 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12309 if ( fNewNoRem
12310 && ( 0
12311#if 0 /* auto enable on first paged protected mode interrupt */
12312 || ( pOrgCtx->eflags.Bits.u1IF
12313 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12314 && TRPMHasTrap(pVCpu)
12315 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12316#endif
12317#if 0
12318 || ( pOrgCtx->cs == 0x10
12319 && ( pOrgCtx->rip == 0x90119e3e
12320 || pOrgCtx->rip == 0x901d9810)
12321#endif
12322#if 0 /* Auto enable DSL - FPU stuff. */
12323 || ( pOrgCtx->cs == 0x10
12324 && (// pOrgCtx->rip == 0xc02ec07f
12325 //|| pOrgCtx->rip == 0xc02ec082
12326 //|| pOrgCtx->rip == 0xc02ec0c9
12327 0
12328 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12329#endif
12330#if 0 /* Auto enable DSL - fstp st0 stuff. */
12331 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12332#endif
12333#if 0
12334 || pOrgCtx->rip == 0x9022bb3a
12335#endif
12336#if 0
12337 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12338#endif
12339#if 0
12340 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12341 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12342#endif
12343#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12344 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12345 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12346 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12347#endif
12348#if 0 /* NT4SP1 - xadd early boot. */
12349 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12350#endif
12351#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12352 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12353#endif
12354#if 0 /* NT4SP1 - cmpxchg (AMD). */
12355 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12356#endif
12357#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12358 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12359#endif
12360#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12361 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12362
12363#endif
12364#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12365 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12366
12367#endif
12368#if 0 /* NT4SP1 - frstor [ecx] */
12369 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12370#endif
12371#if 0 /* xxxxxx - All long mode code. */
12372 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12373#endif
12374#if 0 /* rep movsq linux 3.7 64-bit boot. */
12375 || (pOrgCtx->rip == 0x0000000000100241)
12376#endif
12377#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12378 || (pOrgCtx->rip == 0x000000000215e240)
12379#endif
12380#if 0 /* DOS's size-overridden iret to v8086. */
12381 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12382#endif
12383 )
12384 )
12385 {
12386 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12387 RTLogFlags(NULL, "enabled");
12388 fNewNoRem = false;
12389 }
12390 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12391 {
12392 pVCpu->iem.s.fNoRem = fNewNoRem;
12393 if (!fNewNoRem)
12394 {
12395 LogAlways(("Enabling verification mode!\n"));
12396 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12397 }
12398 else
12399 LogAlways(("Disabling verification mode!\n"));
12400 }
12401
12402 /*
12403 * Switch state.
12404 */
12405 if (IEM_VERIFICATION_ENABLED(pVCpu))
12406 {
12407 static CPUMCTX s_DebugCtx; /* Ugly! */
12408
12409 s_DebugCtx = *pOrgCtx;
12410 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12411 }
12412
12413 /*
12414 * See if there is an interrupt pending in TRPM and inject it if we can.
12415 */
12416 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12417 if ( pOrgCtx->eflags.Bits.u1IF
12418 && TRPMHasTrap(pVCpu)
12419 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12420 {
12421 uint8_t u8TrapNo;
12422 TRPMEVENT enmType;
12423 RTGCUINT uErrCode;
12424 RTGCPTR uCr2;
12425 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12426 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12427 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12428 TRPMResetTrap(pVCpu);
12429 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12430 }
12431
12432 /*
12433 * Reset the counters.
12434 */
12435 pVCpu->iem.s.cIOReads = 0;
12436 pVCpu->iem.s.cIOWrites = 0;
12437 pVCpu->iem.s.fIgnoreRaxRdx = false;
12438 pVCpu->iem.s.fOverlappingMovs = false;
12439 pVCpu->iem.s.fProblematicMemory = false;
12440 pVCpu->iem.s.fUndefinedEFlags = 0;
12441
12442 if (IEM_VERIFICATION_ENABLED(pVCpu))
12443 {
12444 /*
12445 * Free all verification records.
12446 */
12447 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12448 pVCpu->iem.s.pIemEvtRecHead = NULL;
12449 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12450 do
12451 {
12452 while (pEvtRec)
12453 {
12454 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12455 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12456 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12457 pEvtRec = pNext;
12458 }
12459 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12460 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12461 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12462 } while (pEvtRec);
12463 }
12464}
12465
12466
12467/**
12468 * Allocate an event record.
12469 * @returns Pointer to a record.
12470 */
12471IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12472{
12473 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12474 return NULL;
12475
12476 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12477 if (pEvtRec)
12478 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12479 else
12480 {
12481 if (!pVCpu->iem.s.ppIemEvtRecNext)
12482 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12483
12484 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12485 if (!pEvtRec)
12486 return NULL;
12487 }
12488 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12489 pEvtRec->pNext = NULL;
12490 return pEvtRec;
12491}
12492
12493
12494/**
12495 * IOMMMIORead notification.
12496 */
12497VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12498{
12499 PVMCPU pVCpu = VMMGetCpu(pVM);
12500 if (!pVCpu)
12501 return;
12502 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12503 if (!pEvtRec)
12504 return;
12505 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12506 pEvtRec->u.RamRead.GCPhys = GCPhys;
12507 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12508 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12509 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12510}
12511
12512
12513/**
12514 * IOMMMIOWrite notification.
12515 */
12516VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12517{
12518 PVMCPU pVCpu = VMMGetCpu(pVM);
12519 if (!pVCpu)
12520 return;
12521 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12522 if (!pEvtRec)
12523 return;
12524 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12525 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12526 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12527 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12528 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12529 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12530 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12531 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12532 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12533}
12534
12535
12536/**
12537 * IOMIOPortRead notification.
12538 */
12539VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12540{
12541 PVMCPU pVCpu = VMMGetCpu(pVM);
12542 if (!pVCpu)
12543 return;
12544 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12545 if (!pEvtRec)
12546 return;
12547 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12548 pEvtRec->u.IOPortRead.Port = Port;
12549 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12550 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12551 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12552}
12553
12554/**
12555 * IOMIOPortWrite notification.
12556 */
12557VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12558{
12559 PVMCPU pVCpu = VMMGetCpu(pVM);
12560 if (!pVCpu)
12561 return;
12562 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12563 if (!pEvtRec)
12564 return;
12565 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12566 pEvtRec->u.IOPortWrite.Port = Port;
12567 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12568 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12569 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12570 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12571}
12572
12573
12574VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12575{
12576 PVMCPU pVCpu = VMMGetCpu(pVM);
12577 if (!pVCpu)
12578 return;
12579 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12580 if (!pEvtRec)
12581 return;
12582 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12583 pEvtRec->u.IOPortStrRead.Port = Port;
12584 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12585 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12586 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12587 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12588}
12589
12590
12591VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12592{
12593 PVMCPU pVCpu = VMMGetCpu(pVM);
12594 if (!pVCpu)
12595 return;
12596 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12597 if (!pEvtRec)
12598 return;
12599 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12600 pEvtRec->u.IOPortStrWrite.Port = Port;
12601 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12602 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12603 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12604 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12605}
12606
12607
12608/**
12609 * Fakes and records an I/O port read.
12610 *
12611 * @returns VINF_SUCCESS.
12612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12613 * @param Port The I/O port.
12614 * @param pu32Value Where to store the fake value.
12615 * @param cbValue The size of the access.
12616 */
12617IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12618{
12619 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12620 if (pEvtRec)
12621 {
12622 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12623 pEvtRec->u.IOPortRead.Port = Port;
12624 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12625 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12626 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12627 }
12628 pVCpu->iem.s.cIOReads++;
12629 *pu32Value = 0xcccccccc;
12630 return VINF_SUCCESS;
12631}
12632
12633
12634/**
12635 * Fakes and records an I/O port write.
12636 *
12637 * @returns VINF_SUCCESS.
12638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12639 * @param Port The I/O port.
12640 * @param u32Value The value being written.
12641 * @param cbValue The size of the access.
12642 */
12643IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12644{
12645 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12646 if (pEvtRec)
12647 {
12648 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12649 pEvtRec->u.IOPortWrite.Port = Port;
12650 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12651 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12652 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12653 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12654 }
12655 pVCpu->iem.s.cIOWrites++;
12656 return VINF_SUCCESS;
12657}
12658
12659
12660/**
12661 * Used to add extra details about a stub case.
12662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12663 */
12664IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12665{
12666 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12667 PVM pVM = pVCpu->CTX_SUFF(pVM);
12668 PVMCPU pVCpu = pVCpu;
12669 char szRegs[4096];
12670 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12671 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12672 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12673 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12674 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12675 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12676 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12677 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12678 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12679 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12680 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12681 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12682 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12683 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12684 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12685 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12686 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12687 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12688 " efer=%016VR{efer}\n"
12689 " pat=%016VR{pat}\n"
12690 " sf_mask=%016VR{sf_mask}\n"
12691 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12692 " lstar=%016VR{lstar}\n"
12693 " star=%016VR{star} cstar=%016VR{cstar}\n"
12694 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12695 );
12696
12697 char szInstr1[256];
12698 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12699 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12700 szInstr1, sizeof(szInstr1), NULL);
12701 char szInstr2[256];
12702 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12703 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12704 szInstr2, sizeof(szInstr2), NULL);
12705
12706 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12707}
12708
12709
12710/**
12711 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12712 * dump to the assertion info.
12713 *
12714 * @param pEvtRec The record to dump.
12715 */
12716IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12717{
12718 switch (pEvtRec->enmEvent)
12719 {
12720 case IEMVERIFYEVENT_IOPORT_READ:
12721 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12722 pEvtRec->u.IOPortWrite.Port,
12723 pEvtRec->u.IOPortWrite.cbValue);
12724 break;
12725 case IEMVERIFYEVENT_IOPORT_WRITE:
12726 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12727 pEvtRec->u.IOPortWrite.Port,
12728 pEvtRec->u.IOPortWrite.cbValue,
12729 pEvtRec->u.IOPortWrite.u32Value);
12730 break;
12731 case IEMVERIFYEVENT_IOPORT_STR_READ:
12732 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12733 pEvtRec->u.IOPortStrWrite.Port,
12734 pEvtRec->u.IOPortStrWrite.cbValue,
12735 pEvtRec->u.IOPortStrWrite.cTransfers);
12736 break;
12737 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12738 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12739 pEvtRec->u.IOPortStrWrite.Port,
12740 pEvtRec->u.IOPortStrWrite.cbValue,
12741 pEvtRec->u.IOPortStrWrite.cTransfers);
12742 break;
12743 case IEMVERIFYEVENT_RAM_READ:
12744 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12745 pEvtRec->u.RamRead.GCPhys,
12746 pEvtRec->u.RamRead.cb);
12747 break;
12748 case IEMVERIFYEVENT_RAM_WRITE:
12749 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12750 pEvtRec->u.RamWrite.GCPhys,
12751 pEvtRec->u.RamWrite.cb,
12752 (int)pEvtRec->u.RamWrite.cb,
12753 pEvtRec->u.RamWrite.ab);
12754 break;
12755 default:
12756 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12757 break;
12758 }
12759}
12760
12761
12762/**
12763 * Raises an assertion on the specified record, showing the given message with
12764 * a record dump attached.
12765 *
12766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12767 * @param pEvtRec1 The first record.
12768 * @param pEvtRec2 The second record.
12769 * @param pszMsg The message explaining why we're asserting.
12770 */
12771IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12772{
12773 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12774 iemVerifyAssertAddRecordDump(pEvtRec1);
12775 iemVerifyAssertAddRecordDump(pEvtRec2);
12776 iemVerifyAssertMsg2(pVCpu);
12777 RTAssertPanic();
12778}
12779
12780
12781/**
12782 * Raises an assertion on the specified record, showing the given message with
12783 * a record dump attached.
12784 *
12785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12786 * @param pEvtRec1 The first record.
12787 * @param pszMsg The message explaining why we're asserting.
12788 */
12789IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
12790{
12791 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12792 iemVerifyAssertAddRecordDump(pEvtRec);
12793 iemVerifyAssertMsg2(pVCpu);
12794 RTAssertPanic();
12795}
12796
12797
12798/**
12799 * Verifies a write record.
12800 *
12801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12802 * @param pEvtRec The write record.
12803 * @param fRem Set if REM was doing the other executing. If clear
12804 * it was HM.
12805 */
12806IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
12807{
12808 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
12809 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
12810 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
12811 if ( RT_FAILURE(rc)
12812 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
12813 {
12814 /* fend off ins */
12815 if ( !pVCpu->iem.s.cIOReads
12816 || pEvtRec->u.RamWrite.ab[0] != 0xcc
12817 || ( pEvtRec->u.RamWrite.cb != 1
12818 && pEvtRec->u.RamWrite.cb != 2
12819 && pEvtRec->u.RamWrite.cb != 4) )
12820 {
12821 /* fend off ROMs and MMIO */
12822 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
12823 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
12824 {
12825 /* fend off fxsave */
12826 if (pEvtRec->u.RamWrite.cb != 512)
12827 {
12828 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
12829 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12830 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
12831 RTAssertMsg2Add("%s: %.*Rhxs\n"
12832 "iem: %.*Rhxs\n",
12833 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
12834 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
12835 iemVerifyAssertAddRecordDump(pEvtRec);
12836 iemVerifyAssertMsg2(pVCpu);
12837 RTAssertPanic();
12838 }
12839 }
12840 }
12841 }
12842
12843}
12844
12845/**
12846 * Performs the post-execution verfication checks.
12847 */
12848IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
12849{
12850 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12851 return rcStrictIem;
12852
12853 /*
12854 * Switch back the state.
12855 */
12856 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
12857 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
12858 Assert(pOrgCtx != pDebugCtx);
12859 IEM_GET_CTX(pVCpu) = pOrgCtx;
12860
12861 /*
12862 * Execute the instruction in REM.
12863 */
12864 bool fRem = false;
12865 PVM pVM = pVCpu->CTX_SUFF(pVM);
12866 PVMCPU pVCpu = pVCpu;
12867 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
12868#ifdef IEM_VERIFICATION_MODE_FULL_HM
12869 if ( HMIsEnabled(pVM)
12870 && pVCpu->iem.s.cIOReads == 0
12871 && pVCpu->iem.s.cIOWrites == 0
12872 && !pVCpu->iem.s.fProblematicMemory)
12873 {
12874 uint64_t uStartRip = pOrgCtx->rip;
12875 unsigned iLoops = 0;
12876 do
12877 {
12878 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
12879 iLoops++;
12880 } while ( rc == VINF_SUCCESS
12881 || ( rc == VINF_EM_DBG_STEPPED
12882 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12883 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
12884 || ( pOrgCtx->rip != pDebugCtx->rip
12885 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
12886 && iLoops < 8) );
12887 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
12888 rc = VINF_SUCCESS;
12889 }
12890#endif
12891 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
12892 || rc == VINF_IOM_R3_IOPORT_READ
12893 || rc == VINF_IOM_R3_IOPORT_WRITE
12894 || rc == VINF_IOM_R3_MMIO_READ
12895 || rc == VINF_IOM_R3_MMIO_READ_WRITE
12896 || rc == VINF_IOM_R3_MMIO_WRITE
12897 || rc == VINF_CPUM_R3_MSR_READ
12898 || rc == VINF_CPUM_R3_MSR_WRITE
12899 || rc == VINF_EM_RESCHEDULE
12900 )
12901 {
12902 EMRemLock(pVM);
12903 rc = REMR3EmulateInstruction(pVM, pVCpu);
12904 AssertRC(rc);
12905 EMRemUnlock(pVM);
12906 fRem = true;
12907 }
12908
12909# if 1 /* Skip unimplemented instructions for now. */
12910 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
12911 {
12912 IEM_GET_CTX(pVCpu) = pOrgCtx;
12913 if (rc == VINF_EM_DBG_STEPPED)
12914 return VINF_SUCCESS;
12915 return rc;
12916 }
12917# endif
12918
12919 /*
12920 * Compare the register states.
12921 */
12922 unsigned cDiffs = 0;
12923 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
12924 {
12925 //Log(("REM and IEM ends up with different registers!\n"));
12926 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
12927
12928# define CHECK_FIELD(a_Field) \
12929 do \
12930 { \
12931 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
12932 { \
12933 switch (sizeof(pOrgCtx->a_Field)) \
12934 { \
12935 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
12936 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
12937 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
12938 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
12939 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
12940 } \
12941 cDiffs++; \
12942 } \
12943 } while (0)
12944# define CHECK_XSTATE_FIELD(a_Field) \
12945 do \
12946 { \
12947 if (pOrgXState->a_Field != pDebugXState->a_Field) \
12948 { \
12949 switch (sizeof(pOrgXState->a_Field)) \
12950 { \
12951 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
12952 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
12953 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
12954 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
12955 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
12956 } \
12957 cDiffs++; \
12958 } \
12959 } while (0)
12960
12961# define CHECK_BIT_FIELD(a_Field) \
12962 do \
12963 { \
12964 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
12965 { \
12966 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
12967 cDiffs++; \
12968 } \
12969 } while (0)
12970
12971# define CHECK_SEL(a_Sel) \
12972 do \
12973 { \
12974 CHECK_FIELD(a_Sel.Sel); \
12975 CHECK_FIELD(a_Sel.Attr.u); \
12976 CHECK_FIELD(a_Sel.u64Base); \
12977 CHECK_FIELD(a_Sel.u32Limit); \
12978 CHECK_FIELD(a_Sel.fFlags); \
12979 } while (0)
12980
12981 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
12982 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
12983
12984#if 1 /* The recompiler doesn't update these the intel way. */
12985 if (fRem)
12986 {
12987 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
12988 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
12989 pOrgXState->x87.CS = pDebugXState->x87.CS;
12990 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
12991 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
12992 pOrgXState->x87.DS = pDebugXState->x87.DS;
12993 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
12994 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
12995 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
12996 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
12997 }
12998#endif
12999 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13000 {
13001 RTAssertMsg2Weak(" the FPU state differs\n");
13002 cDiffs++;
13003 CHECK_XSTATE_FIELD(x87.FCW);
13004 CHECK_XSTATE_FIELD(x87.FSW);
13005 CHECK_XSTATE_FIELD(x87.FTW);
13006 CHECK_XSTATE_FIELD(x87.FOP);
13007 CHECK_XSTATE_FIELD(x87.FPUIP);
13008 CHECK_XSTATE_FIELD(x87.CS);
13009 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13010 CHECK_XSTATE_FIELD(x87.FPUDP);
13011 CHECK_XSTATE_FIELD(x87.DS);
13012 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13013 CHECK_XSTATE_FIELD(x87.MXCSR);
13014 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13015 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13016 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13017 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13018 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13019 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13020 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13021 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13022 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13023 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13024 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13025 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13026 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13027 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13028 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13029 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13030 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13031 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13032 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13033 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13034 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13035 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13036 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13037 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13038 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13039 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13040 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13041 }
13042 CHECK_FIELD(rip);
13043 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13044 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13045 {
13046 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13047 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13048 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13049 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13050 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13051 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13052 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13053 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13054 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13055 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13056 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13057 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13058 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13059 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13060 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13061 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13062 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13063 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13064 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13065 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13066 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13067 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13068 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13069 }
13070
13071 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13072 CHECK_FIELD(rax);
13073 CHECK_FIELD(rcx);
13074 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13075 CHECK_FIELD(rdx);
13076 CHECK_FIELD(rbx);
13077 CHECK_FIELD(rsp);
13078 CHECK_FIELD(rbp);
13079 CHECK_FIELD(rsi);
13080 CHECK_FIELD(rdi);
13081 CHECK_FIELD(r8);
13082 CHECK_FIELD(r9);
13083 CHECK_FIELD(r10);
13084 CHECK_FIELD(r11);
13085 CHECK_FIELD(r12);
13086 CHECK_FIELD(r13);
13087 CHECK_SEL(cs);
13088 CHECK_SEL(ss);
13089 CHECK_SEL(ds);
13090 CHECK_SEL(es);
13091 CHECK_SEL(fs);
13092 CHECK_SEL(gs);
13093 CHECK_FIELD(cr0);
13094
13095 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13096 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13097 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13098 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13099 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13100 {
13101 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13102 { /* ignore */ }
13103 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13104 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13105 && fRem)
13106 { /* ignore */ }
13107 else
13108 CHECK_FIELD(cr2);
13109 }
13110 CHECK_FIELD(cr3);
13111 CHECK_FIELD(cr4);
13112 CHECK_FIELD(dr[0]);
13113 CHECK_FIELD(dr[1]);
13114 CHECK_FIELD(dr[2]);
13115 CHECK_FIELD(dr[3]);
13116 CHECK_FIELD(dr[6]);
13117 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13118 CHECK_FIELD(dr[7]);
13119 CHECK_FIELD(gdtr.cbGdt);
13120 CHECK_FIELD(gdtr.pGdt);
13121 CHECK_FIELD(idtr.cbIdt);
13122 CHECK_FIELD(idtr.pIdt);
13123 CHECK_SEL(ldtr);
13124 CHECK_SEL(tr);
13125 CHECK_FIELD(SysEnter.cs);
13126 CHECK_FIELD(SysEnter.eip);
13127 CHECK_FIELD(SysEnter.esp);
13128 CHECK_FIELD(msrEFER);
13129 CHECK_FIELD(msrSTAR);
13130 CHECK_FIELD(msrPAT);
13131 CHECK_FIELD(msrLSTAR);
13132 CHECK_FIELD(msrCSTAR);
13133 CHECK_FIELD(msrSFMASK);
13134 CHECK_FIELD(msrKERNELGSBASE);
13135
13136 if (cDiffs != 0)
13137 {
13138 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13139 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13140 RTAssertPanic();
13141 static bool volatile s_fEnterDebugger = true;
13142 if (s_fEnterDebugger)
13143 DBGFSTOP(pVM);
13144
13145# if 1 /* Ignore unimplemented instructions for now. */
13146 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13147 rcStrictIem = VINF_SUCCESS;
13148# endif
13149 }
13150# undef CHECK_FIELD
13151# undef CHECK_BIT_FIELD
13152 }
13153
13154 /*
13155 * If the register state compared fine, check the verification event
13156 * records.
13157 */
13158 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13159 {
13160 /*
13161 * Compare verficiation event records.
13162 * - I/O port accesses should be a 1:1 match.
13163 */
13164 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13165 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13166 while (pIemRec && pOtherRec)
13167 {
13168 /* Since we might miss RAM writes and reads, ignore reads and check
13169 that any written memory is the same extra ones. */
13170 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13171 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13172 && pIemRec->pNext)
13173 {
13174 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13175 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13176 pIemRec = pIemRec->pNext;
13177 }
13178
13179 /* Do the compare. */
13180 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13181 {
13182 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13183 break;
13184 }
13185 bool fEquals;
13186 switch (pIemRec->enmEvent)
13187 {
13188 case IEMVERIFYEVENT_IOPORT_READ:
13189 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13190 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13191 break;
13192 case IEMVERIFYEVENT_IOPORT_WRITE:
13193 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13194 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13195 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13196 break;
13197 case IEMVERIFYEVENT_IOPORT_STR_READ:
13198 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13199 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13200 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13201 break;
13202 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13203 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13204 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13205 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13206 break;
13207 case IEMVERIFYEVENT_RAM_READ:
13208 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13209 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13210 break;
13211 case IEMVERIFYEVENT_RAM_WRITE:
13212 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13213 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13214 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13215 break;
13216 default:
13217 fEquals = false;
13218 break;
13219 }
13220 if (!fEquals)
13221 {
13222 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13223 break;
13224 }
13225
13226 /* advance */
13227 pIemRec = pIemRec->pNext;
13228 pOtherRec = pOtherRec->pNext;
13229 }
13230
13231 /* Ignore extra writes and reads. */
13232 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13233 {
13234 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13235 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13236 pIemRec = pIemRec->pNext;
13237 }
13238 if (pIemRec != NULL)
13239 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13240 else if (pOtherRec != NULL)
13241 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13242 }
13243 IEM_GET_CTX(pVCpu) = pOrgCtx;
13244
13245 return rcStrictIem;
13246}
13247
13248#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13249
13250/* stubs */
13251IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13252{
13253 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13254 return VERR_INTERNAL_ERROR;
13255}
13256
13257IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13258{
13259 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13260 return VERR_INTERNAL_ERROR;
13261}
13262
13263#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13264
13265
13266#ifdef LOG_ENABLED
13267/**
13268 * Logs the current instruction.
13269 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13270 * @param pCtx The current CPU context.
13271 * @param fSameCtx Set if we have the same context information as the VMM,
13272 * clear if we may have already executed an instruction in
13273 * our debug context. When clear, we assume IEMCPU holds
13274 * valid CPU mode info.
13275 */
13276IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13277{
13278# ifdef IN_RING3
13279 if (LogIs2Enabled())
13280 {
13281 char szInstr[256];
13282 uint32_t cbInstr = 0;
13283 if (fSameCtx)
13284 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13285 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13286 szInstr, sizeof(szInstr), &cbInstr);
13287 else
13288 {
13289 uint32_t fFlags = 0;
13290 switch (pVCpu->iem.s.enmCpuMode)
13291 {
13292 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13293 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13294 case IEMMODE_16BIT:
13295 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13296 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13297 else
13298 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13299 break;
13300 }
13301 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13302 szInstr, sizeof(szInstr), &cbInstr);
13303 }
13304
13305 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13306 Log2(("****\n"
13307 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13308 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13309 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13310 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13311 " %s\n"
13312 ,
13313 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13314 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13315 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13316 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13317 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13318 szInstr));
13319
13320 if (LogIs3Enabled())
13321 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13322 }
13323 else
13324# endif
13325 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13326 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13327}
13328#endif
13329
13330
13331/**
13332 * Makes status code addjustments (pass up from I/O and access handler)
13333 * as well as maintaining statistics.
13334 *
13335 * @returns Strict VBox status code to pass up.
13336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13337 * @param rcStrict The status from executing an instruction.
13338 */
13339DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13340{
13341 if (rcStrict != VINF_SUCCESS)
13342 {
13343 if (RT_SUCCESS(rcStrict))
13344 {
13345 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13346 || rcStrict == VINF_IOM_R3_IOPORT_READ
13347 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13348 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13349 || rcStrict == VINF_IOM_R3_MMIO_READ
13350 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13351 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13352 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13353 || rcStrict == VINF_CPUM_R3_MSR_READ
13354 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13355 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13356 || rcStrict == VINF_EM_RAW_TO_R3
13357 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13358 /* raw-mode / virt handlers only: */
13359 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13360 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13361 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13362 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13363 || rcStrict == VINF_SELM_SYNC_GDT
13364 || rcStrict == VINF_CSAM_PENDING_ACTION
13365 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13366 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13367/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13368 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13369 if (rcPassUp == VINF_SUCCESS)
13370 pVCpu->iem.s.cRetInfStatuses++;
13371 else if ( rcPassUp < VINF_EM_FIRST
13372 || rcPassUp > VINF_EM_LAST
13373 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13374 {
13375 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13376 pVCpu->iem.s.cRetPassUpStatus++;
13377 rcStrict = rcPassUp;
13378 }
13379 else
13380 {
13381 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13382 pVCpu->iem.s.cRetInfStatuses++;
13383 }
13384 }
13385 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13386 pVCpu->iem.s.cRetAspectNotImplemented++;
13387 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13388 pVCpu->iem.s.cRetInstrNotImplemented++;
13389#ifdef IEM_VERIFICATION_MODE_FULL
13390 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13391 rcStrict = VINF_SUCCESS;
13392#endif
13393 else
13394 pVCpu->iem.s.cRetErrStatuses++;
13395 }
13396 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13397 {
13398 pVCpu->iem.s.cRetPassUpStatus++;
13399 rcStrict = pVCpu->iem.s.rcPassUp;
13400 }
13401
13402 return rcStrict;
13403}
13404
13405
13406/**
13407 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13408 * IEMExecOneWithPrefetchedByPC.
13409 *
13410 * Similar code is found in IEMExecLots.
13411 *
13412 * @return Strict VBox status code.
13413 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13415 * @param fExecuteInhibit If set, execute the instruction following CLI,
13416 * POP SS and MOV SS,GR.
13417 */
13418#ifdef __GNUC__
13419DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13420#else
13421DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13422#endif
13423{
13424#ifdef IEM_WITH_SETJMP
13425 VBOXSTRICTRC rcStrict;
13426 jmp_buf JmpBuf;
13427 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13428 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13429 if ((rcStrict = setjmp(JmpBuf)) == 0)
13430 {
13431 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13432 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13433 }
13434 else
13435 pVCpu->iem.s.cLongJumps++;
13436 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13437#else
13438 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13439 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13440#endif
13441 if (rcStrict == VINF_SUCCESS)
13442 pVCpu->iem.s.cInstructions++;
13443 if (pVCpu->iem.s.cActiveMappings > 0)
13444 {
13445 Assert(rcStrict != VINF_SUCCESS);
13446 iemMemRollback(pVCpu);
13447 }
13448//#ifdef DEBUG
13449// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13450//#endif
13451
13452 /* Execute the next instruction as well if a cli, pop ss or
13453 mov ss, Gr has just completed successfully. */
13454 if ( fExecuteInhibit
13455 && rcStrict == VINF_SUCCESS
13456 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13457 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13458 {
13459 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13460 if (rcStrict == VINF_SUCCESS)
13461 {
13462#ifdef LOG_ENABLED
13463 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13464#endif
13465#ifdef IEM_WITH_SETJMP
13466 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13467 if ((rcStrict = setjmp(JmpBuf)) == 0)
13468 {
13469 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13470 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13471 }
13472 else
13473 pVCpu->iem.s.cLongJumps++;
13474 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13475#else
13476 IEM_OPCODE_GET_NEXT_U8(&b);
13477 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13478#endif
13479 if (rcStrict == VINF_SUCCESS)
13480 pVCpu->iem.s.cInstructions++;
13481 if (pVCpu->iem.s.cActiveMappings > 0)
13482 {
13483 Assert(rcStrict != VINF_SUCCESS);
13484 iemMemRollback(pVCpu);
13485 }
13486 }
13487 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13488 }
13489
13490 /*
13491 * Return value fiddling, statistics and sanity assertions.
13492 */
13493 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13494
13495 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13496 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13497#if defined(IEM_VERIFICATION_MODE_FULL)
13498 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13499 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13501 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13502#endif
13503 return rcStrict;
13504}
13505
13506
13507#ifdef IN_RC
13508/**
13509 * Re-enters raw-mode or ensure we return to ring-3.
13510 *
13511 * @returns rcStrict, maybe modified.
13512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13513 * @param pCtx The current CPU context.
13514 * @param rcStrict The status code returne by the interpreter.
13515 */
13516DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13517{
13518 if ( !pVCpu->iem.s.fInPatchCode
13519 && ( rcStrict == VINF_SUCCESS
13520 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13521 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13522 {
13523 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13524 CPUMRawEnter(pVCpu);
13525 else
13526 {
13527 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13528 rcStrict = VINF_EM_RESCHEDULE;
13529 }
13530 }
13531 return rcStrict;
13532}
13533#endif
13534
13535
13536/**
13537 * Execute one instruction.
13538 *
13539 * @return Strict VBox status code.
13540 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13541 */
13542VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13543{
13544#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13545 if (++pVCpu->iem.s.cVerifyDepth == 1)
13546 iemExecVerificationModeSetup(pVCpu);
13547#endif
13548#ifdef LOG_ENABLED
13549 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13550 iemLogCurInstr(pVCpu, pCtx, true);
13551#endif
13552
13553 /*
13554 * Do the decoding and emulation.
13555 */
13556 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13557 if (rcStrict == VINF_SUCCESS)
13558 rcStrict = iemExecOneInner(pVCpu, true);
13559
13560#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13561 /*
13562 * Assert some sanity.
13563 */
13564 if (pVCpu->iem.s.cVerifyDepth == 1)
13565 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13566 pVCpu->iem.s.cVerifyDepth--;
13567#endif
13568#ifdef IN_RC
13569 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13570#endif
13571 if (rcStrict != VINF_SUCCESS)
13572 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13573 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13574 return rcStrict;
13575}
13576
13577
13578VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13579{
13580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13581 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13582
13583 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13584 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13585 if (rcStrict == VINF_SUCCESS)
13586 {
13587 rcStrict = iemExecOneInner(pVCpu, true);
13588 if (pcbWritten)
13589 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13590 }
13591
13592#ifdef IN_RC
13593 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13594#endif
13595 return rcStrict;
13596}
13597
13598
13599VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13600 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13601{
13602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13603 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13604
13605 VBOXSTRICTRC rcStrict;
13606 if ( cbOpcodeBytes
13607 && pCtx->rip == OpcodeBytesPC)
13608 {
13609 iemInitDecoder(pVCpu, false);
13610#ifdef IEM_WITH_CODE_TLB
13611 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13612 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13613 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13614 pVCpu->iem.s.offCurInstrStart = 0;
13615 pVCpu->iem.s.offInstrNextByte = 0;
13616#else
13617 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13618 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13619#endif
13620 rcStrict = VINF_SUCCESS;
13621 }
13622 else
13623 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13624 if (rcStrict == VINF_SUCCESS)
13625 {
13626 rcStrict = iemExecOneInner(pVCpu, true);
13627 }
13628
13629#ifdef IN_RC
13630 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13631#endif
13632 return rcStrict;
13633}
13634
13635
13636VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13637{
13638 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13639 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13640
13641 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13642 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13643 if (rcStrict == VINF_SUCCESS)
13644 {
13645 rcStrict = iemExecOneInner(pVCpu, false);
13646 if (pcbWritten)
13647 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13648 }
13649
13650#ifdef IN_RC
13651 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13652#endif
13653 return rcStrict;
13654}
13655
13656
13657VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13658 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13659{
13660 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13661 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13662
13663 VBOXSTRICTRC rcStrict;
13664 if ( cbOpcodeBytes
13665 && pCtx->rip == OpcodeBytesPC)
13666 {
13667 iemInitDecoder(pVCpu, true);
13668#ifdef IEM_WITH_CODE_TLB
13669 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13670 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13671 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13672 pVCpu->iem.s.offCurInstrStart = 0;
13673 pVCpu->iem.s.offInstrNextByte = 0;
13674#else
13675 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13676 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13677#endif
13678 rcStrict = VINF_SUCCESS;
13679 }
13680 else
13681 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13682 if (rcStrict == VINF_SUCCESS)
13683 rcStrict = iemExecOneInner(pVCpu, false);
13684
13685#ifdef IN_RC
13686 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13687#endif
13688 return rcStrict;
13689}
13690
13691
13692/**
13693 * For debugging DISGetParamSize, may come in handy.
13694 *
13695 * @returns Strict VBox status code.
13696 * @param pVCpu The cross context virtual CPU structure of the
13697 * calling EMT.
13698 * @param pCtxCore The context core structure.
13699 * @param OpcodeBytesPC The PC of the opcode bytes.
13700 * @param pvOpcodeBytes Prefeched opcode bytes.
13701 * @param cbOpcodeBytes Number of prefetched bytes.
13702 * @param pcbWritten Where to return the number of bytes written.
13703 * Optional.
13704 */
13705VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13706 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13707 uint32_t *pcbWritten)
13708{
13709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13710 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13711
13712 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13713 VBOXSTRICTRC rcStrict;
13714 if ( cbOpcodeBytes
13715 && pCtx->rip == OpcodeBytesPC)
13716 {
13717 iemInitDecoder(pVCpu, true);
13718#ifdef IEM_WITH_CODE_TLB
13719 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13720 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13721 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13722 pVCpu->iem.s.offCurInstrStart = 0;
13723 pVCpu->iem.s.offInstrNextByte = 0;
13724#else
13725 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13726 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13727#endif
13728 rcStrict = VINF_SUCCESS;
13729 }
13730 else
13731 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13732 if (rcStrict == VINF_SUCCESS)
13733 {
13734 rcStrict = iemExecOneInner(pVCpu, false);
13735 if (pcbWritten)
13736 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13737 }
13738
13739#ifdef IN_RC
13740 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13741#endif
13742 return rcStrict;
13743}
13744
13745
13746VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13747{
13748 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13749
13750#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13751 /*
13752 * See if there is an interrupt pending in TRPM, inject it if we can.
13753 */
13754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13755# ifdef IEM_VERIFICATION_MODE_FULL
13756 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13757# endif
13758 if ( pCtx->eflags.Bits.u1IF
13759 && TRPMHasTrap(pVCpu)
13760 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13761 {
13762 uint8_t u8TrapNo;
13763 TRPMEVENT enmType;
13764 RTGCUINT uErrCode;
13765 RTGCPTR uCr2;
13766 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13767 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13768 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13769 TRPMResetTrap(pVCpu);
13770 }
13771
13772 /*
13773 * Log the state.
13774 */
13775# ifdef LOG_ENABLED
13776 iemLogCurInstr(pVCpu, pCtx, true);
13777# endif
13778
13779 /*
13780 * Do the decoding and emulation.
13781 */
13782 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13783 if (rcStrict == VINF_SUCCESS)
13784 rcStrict = iemExecOneInner(pVCpu, true);
13785
13786 /*
13787 * Assert some sanity.
13788 */
13789 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13790
13791 /*
13792 * Log and return.
13793 */
13794 if (rcStrict != VINF_SUCCESS)
13795 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13796 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13797 if (pcInstructions)
13798 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
13799 return rcStrict;
13800
13801#else /* Not verification mode */
13802
13803 /*
13804 * See if there is an interrupt pending in TRPM, inject it if we can.
13805 */
13806 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13807# ifdef IEM_VERIFICATION_MODE_FULL
13808 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13809# endif
13810 if ( pCtx->eflags.Bits.u1IF
13811 && TRPMHasTrap(pVCpu)
13812 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13813 {
13814 uint8_t u8TrapNo;
13815 TRPMEVENT enmType;
13816 RTGCUINT uErrCode;
13817 RTGCPTR uCr2;
13818 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13819 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13820 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13821 TRPMResetTrap(pVCpu);
13822 }
13823
13824 /*
13825 * Initial decoder init w/ prefetch, then setup setjmp.
13826 */
13827 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13828 if (rcStrict == VINF_SUCCESS)
13829 {
13830# ifdef IEM_WITH_SETJMP
13831 jmp_buf JmpBuf;
13832 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13833 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13834 pVCpu->iem.s.cActiveMappings = 0;
13835 if ((rcStrict = setjmp(JmpBuf)) == 0)
13836# endif
13837 {
13838 /*
13839 * The run loop. We limit ourselves to 4096 instructions right now.
13840 */
13841 PVM pVM = pVCpu->CTX_SUFF(pVM);
13842 uint32_t cInstr = 4096;
13843 for (;;)
13844 {
13845 /*
13846 * Log the state.
13847 */
13848# ifdef LOG_ENABLED
13849 iemLogCurInstr(pVCpu, pCtx, true);
13850# endif
13851
13852 /*
13853 * Do the decoding and emulation.
13854 */
13855 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13856 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13857 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13858 {
13859 Assert(pVCpu->iem.s.cActiveMappings == 0);
13860 pVCpu->iem.s.cInstructions++;
13861 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
13862 {
13863 uint32_t fCpu = pVCpu->fLocalForcedActions
13864 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
13865 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
13866 | VMCPU_FF_TLB_FLUSH
13867# ifdef VBOX_WITH_RAW_MODE
13868 | VMCPU_FF_TRPM_SYNC_IDT
13869 | VMCPU_FF_SELM_SYNC_TSS
13870 | VMCPU_FF_SELM_SYNC_GDT
13871 | VMCPU_FF_SELM_SYNC_LDT
13872# endif
13873 | VMCPU_FF_INHIBIT_INTERRUPTS
13874 | VMCPU_FF_BLOCK_NMIS ));
13875
13876 if (RT_LIKELY( ( !fCpu
13877 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
13878 && !pCtx->rflags.Bits.u1IF) )
13879 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
13880 {
13881 if (cInstr-- > 0)
13882 {
13883 Assert(pVCpu->iem.s.cActiveMappings == 0);
13884 iemReInitDecoder(pVCpu);
13885 continue;
13886 }
13887 }
13888 }
13889 Assert(pVCpu->iem.s.cActiveMappings == 0);
13890 }
13891 else if (pVCpu->iem.s.cActiveMappings > 0)
13892 iemMemRollback(pVCpu);
13893 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13894 break;
13895 }
13896 }
13897# ifdef IEM_WITH_SETJMP
13898 else
13899 {
13900 if (pVCpu->iem.s.cActiveMappings > 0)
13901 iemMemRollback(pVCpu);
13902 pVCpu->iem.s.cLongJumps++;
13903 }
13904 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13905# endif
13906
13907 /*
13908 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
13909 */
13910 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13911 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13912# if defined(IEM_VERIFICATION_MODE_FULL)
13913 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13915 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13917# endif
13918 }
13919
13920 /*
13921 * Maybe re-enter raw-mode and log.
13922 */
13923# ifdef IN_RC
13924 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13925# endif
13926 if (rcStrict != VINF_SUCCESS)
13927 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13928 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13929 if (pcInstructions)
13930 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
13931 return rcStrict;
13932#endif /* Not verification mode */
13933}
13934
13935
13936
13937/**
13938 * Injects a trap, fault, abort, software interrupt or external interrupt.
13939 *
13940 * The parameter list matches TRPMQueryTrapAll pretty closely.
13941 *
13942 * @returns Strict VBox status code.
13943 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13944 * @param u8TrapNo The trap number.
13945 * @param enmType What type is it (trap/fault/abort), software
13946 * interrupt or hardware interrupt.
13947 * @param uErrCode The error code if applicable.
13948 * @param uCr2 The CR2 value if applicable.
13949 * @param cbInstr The instruction length (only relevant for
13950 * software interrupts).
13951 */
13952VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
13953 uint8_t cbInstr)
13954{
13955 iemInitDecoder(pVCpu, false);
13956#ifdef DBGFTRACE_ENABLED
13957 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
13958 u8TrapNo, enmType, uErrCode, uCr2);
13959#endif
13960
13961 uint32_t fFlags;
13962 switch (enmType)
13963 {
13964 case TRPM_HARDWARE_INT:
13965 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
13966 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
13967 uErrCode = uCr2 = 0;
13968 break;
13969
13970 case TRPM_SOFTWARE_INT:
13971 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
13972 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
13973 uErrCode = uCr2 = 0;
13974 break;
13975
13976 case TRPM_TRAP:
13977 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
13978 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
13979 if (u8TrapNo == X86_XCPT_PF)
13980 fFlags |= IEM_XCPT_FLAGS_CR2;
13981 switch (u8TrapNo)
13982 {
13983 case X86_XCPT_DF:
13984 case X86_XCPT_TS:
13985 case X86_XCPT_NP:
13986 case X86_XCPT_SS:
13987 case X86_XCPT_PF:
13988 case X86_XCPT_AC:
13989 fFlags |= IEM_XCPT_FLAGS_ERR;
13990 break;
13991
13992 case X86_XCPT_NMI:
13993 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
13994 break;
13995 }
13996 break;
13997
13998 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13999 }
14000
14001 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14002}
14003
14004
14005/**
14006 * Injects the active TRPM event.
14007 *
14008 * @returns Strict VBox status code.
14009 * @param pVCpu The cross context virtual CPU structure.
14010 */
14011VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14012{
14013#ifndef IEM_IMPLEMENTS_TASKSWITCH
14014 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14015#else
14016 uint8_t u8TrapNo;
14017 TRPMEVENT enmType;
14018 RTGCUINT uErrCode;
14019 RTGCUINTPTR uCr2;
14020 uint8_t cbInstr;
14021 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14022 if (RT_FAILURE(rc))
14023 return rc;
14024
14025 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14026
14027 /** @todo Are there any other codes that imply the event was successfully
14028 * delivered to the guest? See @bugref{6607}. */
14029 if ( rcStrict == VINF_SUCCESS
14030 || rcStrict == VINF_IEM_RAISED_XCPT)
14031 {
14032 TRPMResetTrap(pVCpu);
14033 }
14034 return rcStrict;
14035#endif
14036}
14037
14038
14039VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14040{
14041 return VERR_NOT_IMPLEMENTED;
14042}
14043
14044
14045VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14046{
14047 return VERR_NOT_IMPLEMENTED;
14048}
14049
14050
14051#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14052/**
14053 * Executes a IRET instruction with default operand size.
14054 *
14055 * This is for PATM.
14056 *
14057 * @returns VBox status code.
14058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14059 * @param pCtxCore The register frame.
14060 */
14061VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14062{
14063 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14064
14065 iemCtxCoreToCtx(pCtx, pCtxCore);
14066 iemInitDecoder(pVCpu);
14067 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14068 if (rcStrict == VINF_SUCCESS)
14069 iemCtxToCtxCore(pCtxCore, pCtx);
14070 else
14071 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14072 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14073 return rcStrict;
14074}
14075#endif
14076
14077
14078/**
14079 * Macro used by the IEMExec* method to check the given instruction length.
14080 *
14081 * Will return on failure!
14082 *
14083 * @param a_cbInstr The given instruction length.
14084 * @param a_cbMin The minimum length.
14085 */
14086#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14087 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14088 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14089
14090
14091/**
14092 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14093 *
14094 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14095 *
14096 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14098 * @param rcStrict The status code to fiddle.
14099 */
14100DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14101{
14102 iemUninitExec(pVCpu);
14103#ifdef IN_RC
14104 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14105 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14106#else
14107 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14108#endif
14109}
14110
14111
14112/**
14113 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14114 *
14115 * This API ASSUMES that the caller has already verified that the guest code is
14116 * allowed to access the I/O port. (The I/O port is in the DX register in the
14117 * guest state.)
14118 *
14119 * @returns Strict VBox status code.
14120 * @param pVCpu The cross context virtual CPU structure.
14121 * @param cbValue The size of the I/O port access (1, 2, or 4).
14122 * @param enmAddrMode The addressing mode.
14123 * @param fRepPrefix Indicates whether a repeat prefix is used
14124 * (doesn't matter which for this instruction).
14125 * @param cbInstr The instruction length in bytes.
14126 * @param iEffSeg The effective segment address.
14127 * @param fIoChecked Whether the access to the I/O port has been
14128 * checked or not. It's typically checked in the
14129 * HM scenario.
14130 */
14131VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14132 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14133{
14134 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14135 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14136
14137 /*
14138 * State init.
14139 */
14140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14141
14142 /*
14143 * Switch orgy for getting to the right handler.
14144 */
14145 VBOXSTRICTRC rcStrict;
14146 if (fRepPrefix)
14147 {
14148 switch (enmAddrMode)
14149 {
14150 case IEMMODE_16BIT:
14151 switch (cbValue)
14152 {
14153 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14154 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14155 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14156 default:
14157 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14158 }
14159 break;
14160
14161 case IEMMODE_32BIT:
14162 switch (cbValue)
14163 {
14164 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14165 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14166 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14167 default:
14168 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14169 }
14170 break;
14171
14172 case IEMMODE_64BIT:
14173 switch (cbValue)
14174 {
14175 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14176 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14177 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14178 default:
14179 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14180 }
14181 break;
14182
14183 default:
14184 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14185 }
14186 }
14187 else
14188 {
14189 switch (enmAddrMode)
14190 {
14191 case IEMMODE_16BIT:
14192 switch (cbValue)
14193 {
14194 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14195 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14196 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14197 default:
14198 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14199 }
14200 break;
14201
14202 case IEMMODE_32BIT:
14203 switch (cbValue)
14204 {
14205 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14206 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14207 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14208 default:
14209 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14210 }
14211 break;
14212
14213 case IEMMODE_64BIT:
14214 switch (cbValue)
14215 {
14216 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14217 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14218 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14219 default:
14220 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14221 }
14222 break;
14223
14224 default:
14225 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14226 }
14227 }
14228
14229 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14230}
14231
14232
14233/**
14234 * Interface for HM and EM for executing string I/O IN (read) instructions.
14235 *
14236 * This API ASSUMES that the caller has already verified that the guest code is
14237 * allowed to access the I/O port. (The I/O port is in the DX register in the
14238 * guest state.)
14239 *
14240 * @returns Strict VBox status code.
14241 * @param pVCpu The cross context virtual CPU structure.
14242 * @param cbValue The size of the I/O port access (1, 2, or 4).
14243 * @param enmAddrMode The addressing mode.
14244 * @param fRepPrefix Indicates whether a repeat prefix is used
14245 * (doesn't matter which for this instruction).
14246 * @param cbInstr The instruction length in bytes.
14247 * @param fIoChecked Whether the access to the I/O port has been
14248 * checked or not. It's typically checked in the
14249 * HM scenario.
14250 */
14251VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14252 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14253{
14254 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14255
14256 /*
14257 * State init.
14258 */
14259 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14260
14261 /*
14262 * Switch orgy for getting to the right handler.
14263 */
14264 VBOXSTRICTRC rcStrict;
14265 if (fRepPrefix)
14266 {
14267 switch (enmAddrMode)
14268 {
14269 case IEMMODE_16BIT:
14270 switch (cbValue)
14271 {
14272 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14273 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14274 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14275 default:
14276 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14277 }
14278 break;
14279
14280 case IEMMODE_32BIT:
14281 switch (cbValue)
14282 {
14283 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14284 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14285 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14286 default:
14287 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14288 }
14289 break;
14290
14291 case IEMMODE_64BIT:
14292 switch (cbValue)
14293 {
14294 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14295 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14296 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14297 default:
14298 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14299 }
14300 break;
14301
14302 default:
14303 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14304 }
14305 }
14306 else
14307 {
14308 switch (enmAddrMode)
14309 {
14310 case IEMMODE_16BIT:
14311 switch (cbValue)
14312 {
14313 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14314 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14315 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14316 default:
14317 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14318 }
14319 break;
14320
14321 case IEMMODE_32BIT:
14322 switch (cbValue)
14323 {
14324 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14325 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14326 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14327 default:
14328 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14329 }
14330 break;
14331
14332 case IEMMODE_64BIT:
14333 switch (cbValue)
14334 {
14335 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14336 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14337 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14338 default:
14339 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14340 }
14341 break;
14342
14343 default:
14344 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14345 }
14346 }
14347
14348 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14349}
14350
14351
14352/**
14353 * Interface for rawmode to write execute an OUT instruction.
14354 *
14355 * @returns Strict VBox status code.
14356 * @param pVCpu The cross context virtual CPU structure.
14357 * @param cbInstr The instruction length in bytes.
14358 * @param u16Port The port to read.
14359 * @param cbReg The register size.
14360 *
14361 * @remarks In ring-0 not all of the state needs to be synced in.
14362 */
14363VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14364{
14365 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14366 Assert(cbReg <= 4 && cbReg != 3);
14367
14368 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14369 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14370 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14371}
14372
14373
14374/**
14375 * Interface for rawmode to write execute an IN instruction.
14376 *
14377 * @returns Strict VBox status code.
14378 * @param pVCpu The cross context virtual CPU structure.
14379 * @param cbInstr The instruction length in bytes.
14380 * @param u16Port The port to read.
14381 * @param cbReg The register size.
14382 */
14383VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14384{
14385 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14386 Assert(cbReg <= 4 && cbReg != 3);
14387
14388 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14389 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14390 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14391}
14392
14393
14394/**
14395 * Interface for HM and EM to write to a CRx register.
14396 *
14397 * @returns Strict VBox status code.
14398 * @param pVCpu The cross context virtual CPU structure.
14399 * @param cbInstr The instruction length in bytes.
14400 * @param iCrReg The control register number (destination).
14401 * @param iGReg The general purpose register number (source).
14402 *
14403 * @remarks In ring-0 not all of the state needs to be synced in.
14404 */
14405VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14406{
14407 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14408 Assert(iCrReg < 16);
14409 Assert(iGReg < 16);
14410
14411 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14412 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14413 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14414}
14415
14416
14417/**
14418 * Interface for HM and EM to read from a CRx register.
14419 *
14420 * @returns Strict VBox status code.
14421 * @param pVCpu The cross context virtual CPU structure.
14422 * @param cbInstr The instruction length in bytes.
14423 * @param iGReg The general purpose register number (destination).
14424 * @param iCrReg The control register number (source).
14425 *
14426 * @remarks In ring-0 not all of the state needs to be synced in.
14427 */
14428VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14429{
14430 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14431 Assert(iCrReg < 16);
14432 Assert(iGReg < 16);
14433
14434 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14436 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14437}
14438
14439
14440/**
14441 * Interface for HM and EM to clear the CR0[TS] bit.
14442 *
14443 * @returns Strict VBox status code.
14444 * @param pVCpu The cross context virtual CPU structure.
14445 * @param cbInstr The instruction length in bytes.
14446 *
14447 * @remarks In ring-0 not all of the state needs to be synced in.
14448 */
14449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14450{
14451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14452
14453 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14454 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14455 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14456}
14457
14458
14459/**
14460 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14461 *
14462 * @returns Strict VBox status code.
14463 * @param pVCpu The cross context virtual CPU structure.
14464 * @param cbInstr The instruction length in bytes.
14465 * @param uValue The value to load into CR0.
14466 *
14467 * @remarks In ring-0 not all of the state needs to be synced in.
14468 */
14469VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14470{
14471 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14472
14473 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14474 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14476}
14477
14478
14479/**
14480 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14481 *
14482 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14483 *
14484 * @returns Strict VBox status code.
14485 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14486 * @param cbInstr The instruction length in bytes.
14487 * @remarks In ring-0 not all of the state needs to be synced in.
14488 * @thread EMT(pVCpu)
14489 */
14490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14491{
14492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14493
14494 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14497}
14498
14499#ifdef IN_RING3
14500
14501/**
14502 * Handles the unlikely and probably fatal merge cases.
14503 *
14504 * @returns Merged status code.
14505 * @param rcStrict Current EM status code.
14506 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14507 * with @a rcStrict.
14508 * @param iMemMap The memory mapping index. For error reporting only.
14509 * @param pVCpu The cross context virtual CPU structure of the calling
14510 * thread, for error reporting only.
14511 */
14512DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14513 unsigned iMemMap, PVMCPU pVCpu)
14514{
14515 if (RT_FAILURE_NP(rcStrict))
14516 return rcStrict;
14517
14518 if (RT_FAILURE_NP(rcStrictCommit))
14519 return rcStrictCommit;
14520
14521 if (rcStrict == rcStrictCommit)
14522 return rcStrictCommit;
14523
14524 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14525 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14526 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14527 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14528 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14529 return VERR_IOM_FF_STATUS_IPE;
14530}
14531
14532
14533/**
14534 * Helper for IOMR3ProcessForceFlag.
14535 *
14536 * @returns Merged status code.
14537 * @param rcStrict Current EM status code.
14538 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14539 * with @a rcStrict.
14540 * @param iMemMap The memory mapping index. For error reporting only.
14541 * @param pVCpu The cross context virtual CPU structure of the calling
14542 * thread, for error reporting only.
14543 */
14544DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14545{
14546 /* Simple. */
14547 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14548 return rcStrictCommit;
14549
14550 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14551 return rcStrict;
14552
14553 /* EM scheduling status codes. */
14554 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14555 && rcStrict <= VINF_EM_LAST))
14556 {
14557 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14558 && rcStrictCommit <= VINF_EM_LAST))
14559 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14560 }
14561
14562 /* Unlikely */
14563 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14564}
14565
14566
14567/**
14568 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14569 *
14570 * @returns Merge between @a rcStrict and what the commit operation returned.
14571 * @param pVM The cross context VM structure.
14572 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14573 * @param rcStrict The status code returned by ring-0 or raw-mode.
14574 */
14575VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14576{
14577 /*
14578 * Reset the pending commit.
14579 */
14580 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14581 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14582 ("%#x %#x %#x\n",
14583 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14584 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14585
14586 /*
14587 * Commit the pending bounce buffers (usually just one).
14588 */
14589 unsigned cBufs = 0;
14590 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14591 while (iMemMap-- > 0)
14592 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14593 {
14594 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14595 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14596 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14597
14598 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14599 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14600 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14601
14602 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14603 {
14604 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14605 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14606 pbBuf,
14607 cbFirst,
14608 PGMACCESSORIGIN_IEM);
14609 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14610 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14611 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14612 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14613 }
14614
14615 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14616 {
14617 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14618 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14619 pbBuf + cbFirst,
14620 cbSecond,
14621 PGMACCESSORIGIN_IEM);
14622 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14623 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14624 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14625 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14626 }
14627 cBufs++;
14628 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14629 }
14630
14631 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14632 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14633 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14634 pVCpu->iem.s.cActiveMappings = 0;
14635 return rcStrict;
14636}
14637
14638#endif /* IN_RING3 */
14639
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette