VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62251

Last change on this file since 62251 was 62246, checked in by vboxsync, 9 years ago

iemMapLookup: Duh!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 567.9 KB
Line 
1/* $Id: IEMAll.cpp 62246 2016-07-14 11:52:13Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84//#define IEM_WITH_CODE_TLB - work in progress
85
86
87/*********************************************************************************************************************************
88* Header Files *
89*********************************************************************************************************************************/
90#define LOG_GROUP LOG_GROUP_IEM
91#define VMCPU_INCL_CPUM_GST_CTX
92#include <VBox/vmm/iem.h>
93#include <VBox/vmm/cpum.h>
94#include <VBox/vmm/pdm.h>
95#include <VBox/vmm/pgm.h>
96#include <internal/pgm.h>
97#include <VBox/vmm/iom.h>
98#include <VBox/vmm/em.h>
99#include <VBox/vmm/hm.h>
100#include <VBox/vmm/tm.h>
101#include <VBox/vmm/dbgf.h>
102#include <VBox/vmm/dbgftrace.h>
103#ifdef VBOX_WITH_RAW_MODE_NOT_R0
104# include <VBox/vmm/patm.h>
105# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
106# include <VBox/vmm/csam.h>
107# endif
108#endif
109#include "IEMInternal.h"
110#ifdef IEM_VERIFICATION_MODE_FULL
111# include <VBox/vmm/rem.h>
112# include <VBox/vmm/mm.h>
113#endif
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211
212/*********************************************************************************************************************************
213* Defined Constants And Macros *
214*********************************************************************************************************************************/
215/** @def IEM_WITH_SETJMP
216 * Enables alternative status code handling using setjmps.
217 *
218 * This adds a bit of expense via the setjmp() call since it saves all the
219 * non-volatile registers. However, it eliminates return code checks and allows
220 * for more optimal return value passing (return regs instead of stack buffer).
221 */
222#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
223# define IEM_WITH_SETJMP
224#endif
225
226/** Temporary hack to disable the double execution. Will be removed in favor
227 * of a dedicated execution mode in EM. */
228//#define IEM_VERIFICATION_MODE_NO_REM
229
230/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
231 * due to GCC lacking knowledge about the value range of a switch. */
232#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
233
234/**
235 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
236 * occation.
237 */
238#ifdef LOG_ENABLED
239# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
240 do { \
241 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
242 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
243 } while (0)
244#else
245# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
246 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
247#endif
248
249/**
250 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
251 * occation using the supplied logger statement.
252 *
253 * @param a_LoggerArgs What to log on failure.
254 */
255#ifdef LOG_ENABLED
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
257 do { \
258 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
259 /*LogFunc(a_LoggerArgs);*/ \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
261 } while (0)
262#else
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
265#endif
266
267/**
268 * Call an opcode decoder function.
269 *
270 * We're using macors for this so that adding and removing parameters can be
271 * done as we please. See FNIEMOP_DEF.
272 */
273#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
274
275/**
276 * Call a common opcode decoder function taking one extra argument.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF_1.
280 */
281#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
290
291/**
292 * Check if we're currently executing in real or virtual 8086 mode.
293 *
294 * @returns @c true if it is, @c false if not.
295 * @param a_pVCpu The IEM state of the current CPU.
296 */
297#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
298
299/**
300 * Check if we're currently executing in virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
304 */
305#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in long mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in real mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
325 * @returns PCCPUMFEATURES
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
336
337/**
338 * Evaluates to true if we're presenting an Intel CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
341
342/**
343 * Evaluates to true if we're presenting an AMD CPU to the guest.
344 */
345#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
346
347/**
348 * Check if the address is canonical.
349 */
350#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
351
352/** @def IEM_USE_UNALIGNED_DATA_ACCESS
353 * Use unaligned accesses instead of elaborate byte assembly. */
354#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
355# define IEM_USE_UNALIGNED_DATA_ACCESS
356#endif
357
358
359/*********************************************************************************************************************************
360* Global Variables *
361*********************************************************************************************************************************/
362extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
363
364
365/** Function table for the ADD instruction. */
366IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
367{
368 iemAImpl_add_u8, iemAImpl_add_u8_locked,
369 iemAImpl_add_u16, iemAImpl_add_u16_locked,
370 iemAImpl_add_u32, iemAImpl_add_u32_locked,
371 iemAImpl_add_u64, iemAImpl_add_u64_locked
372};
373
374/** Function table for the ADC instruction. */
375IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
376{
377 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
378 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
379 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
380 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
381};
382
383/** Function table for the SUB instruction. */
384IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
385{
386 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
387 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
388 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
389 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
390};
391
392/** Function table for the SBB instruction. */
393IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
394{
395 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
396 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
397 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
398 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
399};
400
401/** Function table for the OR instruction. */
402IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
403{
404 iemAImpl_or_u8, iemAImpl_or_u8_locked,
405 iemAImpl_or_u16, iemAImpl_or_u16_locked,
406 iemAImpl_or_u32, iemAImpl_or_u32_locked,
407 iemAImpl_or_u64, iemAImpl_or_u64_locked
408};
409
410/** Function table for the XOR instruction. */
411IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
412{
413 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
414 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
415 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
416 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
417};
418
419/** Function table for the AND instruction. */
420IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
421{
422 iemAImpl_and_u8, iemAImpl_and_u8_locked,
423 iemAImpl_and_u16, iemAImpl_and_u16_locked,
424 iemAImpl_and_u32, iemAImpl_and_u32_locked,
425 iemAImpl_and_u64, iemAImpl_and_u64_locked
426};
427
428/** Function table for the CMP instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
432{
433 iemAImpl_cmp_u8, NULL,
434 iemAImpl_cmp_u16, NULL,
435 iemAImpl_cmp_u32, NULL,
436 iemAImpl_cmp_u64, NULL
437};
438
439/** Function table for the TEST instruction.
440 * @remarks Making operand order ASSUMPTIONS.
441 */
442IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
443{
444 iemAImpl_test_u8, NULL,
445 iemAImpl_test_u16, NULL,
446 iemAImpl_test_u32, NULL,
447 iemAImpl_test_u64, NULL
448};
449
450/** Function table for the BT instruction. */
451IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
452{
453 NULL, NULL,
454 iemAImpl_bt_u16, NULL,
455 iemAImpl_bt_u32, NULL,
456 iemAImpl_bt_u64, NULL
457};
458
459/** Function table for the BTC instruction. */
460IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
461{
462 NULL, NULL,
463 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
464 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
465 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
466};
467
468/** Function table for the BTR instruction. */
469IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
470{
471 NULL, NULL,
472 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
473 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
474 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
475};
476
477/** Function table for the BTS instruction. */
478IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
479{
480 NULL, NULL,
481 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
482 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
483 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
484};
485
486/** Function table for the BSF instruction. */
487IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
488{
489 NULL, NULL,
490 iemAImpl_bsf_u16, NULL,
491 iemAImpl_bsf_u32, NULL,
492 iemAImpl_bsf_u64, NULL
493};
494
495/** Function table for the BSR instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
497{
498 NULL, NULL,
499 iemAImpl_bsr_u16, NULL,
500 iemAImpl_bsr_u32, NULL,
501 iemAImpl_bsr_u64, NULL
502};
503
504/** Function table for the IMUL instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
506{
507 NULL, NULL,
508 iemAImpl_imul_two_u16, NULL,
509 iemAImpl_imul_two_u32, NULL,
510 iemAImpl_imul_two_u64, NULL
511};
512
513/** Group 1 /r lookup table. */
514IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
515{
516 &g_iemAImpl_add,
517 &g_iemAImpl_or,
518 &g_iemAImpl_adc,
519 &g_iemAImpl_sbb,
520 &g_iemAImpl_and,
521 &g_iemAImpl_sub,
522 &g_iemAImpl_xor,
523 &g_iemAImpl_cmp
524};
525
526/** Function table for the INC instruction. */
527IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
528{
529 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
530 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
531 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
532 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
533};
534
535/** Function table for the DEC instruction. */
536IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
537{
538 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
539 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
540 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
541 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
542};
543
544/** Function table for the NEG instruction. */
545IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
546{
547 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
548 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
549 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
550 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
551};
552
553/** Function table for the NOT instruction. */
554IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
555{
556 iemAImpl_not_u8, iemAImpl_not_u8_locked,
557 iemAImpl_not_u16, iemAImpl_not_u16_locked,
558 iemAImpl_not_u32, iemAImpl_not_u32_locked,
559 iemAImpl_not_u64, iemAImpl_not_u64_locked
560};
561
562
563/** Function table for the ROL instruction. */
564IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
565{
566 iemAImpl_rol_u8,
567 iemAImpl_rol_u16,
568 iemAImpl_rol_u32,
569 iemAImpl_rol_u64
570};
571
572/** Function table for the ROR instruction. */
573IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
574{
575 iemAImpl_ror_u8,
576 iemAImpl_ror_u16,
577 iemAImpl_ror_u32,
578 iemAImpl_ror_u64
579};
580
581/** Function table for the RCL instruction. */
582IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
583{
584 iemAImpl_rcl_u8,
585 iemAImpl_rcl_u16,
586 iemAImpl_rcl_u32,
587 iemAImpl_rcl_u64
588};
589
590/** Function table for the RCR instruction. */
591IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
592{
593 iemAImpl_rcr_u8,
594 iemAImpl_rcr_u16,
595 iemAImpl_rcr_u32,
596 iemAImpl_rcr_u64
597};
598
599/** Function table for the SHL instruction. */
600IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
601{
602 iemAImpl_shl_u8,
603 iemAImpl_shl_u16,
604 iemAImpl_shl_u32,
605 iemAImpl_shl_u64
606};
607
608/** Function table for the SHR instruction. */
609IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
610{
611 iemAImpl_shr_u8,
612 iemAImpl_shr_u16,
613 iemAImpl_shr_u32,
614 iemAImpl_shr_u64
615};
616
617/** Function table for the SAR instruction. */
618IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
619{
620 iemAImpl_sar_u8,
621 iemAImpl_sar_u16,
622 iemAImpl_sar_u32,
623 iemAImpl_sar_u64
624};
625
626
627/** Function table for the MUL instruction. */
628IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
629{
630 iemAImpl_mul_u8,
631 iemAImpl_mul_u16,
632 iemAImpl_mul_u32,
633 iemAImpl_mul_u64
634};
635
636/** Function table for the IMUL instruction working implicitly on rAX. */
637IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
638{
639 iemAImpl_imul_u8,
640 iemAImpl_imul_u16,
641 iemAImpl_imul_u32,
642 iemAImpl_imul_u64
643};
644
645/** Function table for the DIV instruction. */
646IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
647{
648 iemAImpl_div_u8,
649 iemAImpl_div_u16,
650 iemAImpl_div_u32,
651 iemAImpl_div_u64
652};
653
654/** Function table for the MUL instruction. */
655IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
656{
657 iemAImpl_idiv_u8,
658 iemAImpl_idiv_u16,
659 iemAImpl_idiv_u32,
660 iemAImpl_idiv_u64
661};
662
663/** Function table for the SHLD instruction */
664IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
665{
666 iemAImpl_shld_u16,
667 iemAImpl_shld_u32,
668 iemAImpl_shld_u64,
669};
670
671/** Function table for the SHRD instruction */
672IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
673{
674 iemAImpl_shrd_u16,
675 iemAImpl_shrd_u32,
676 iemAImpl_shrd_u64,
677};
678
679
680/** Function table for the PUNPCKLBW instruction */
681IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
682/** Function table for the PUNPCKLBD instruction */
683IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
684/** Function table for the PUNPCKLDQ instruction */
685IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
686/** Function table for the PUNPCKLQDQ instruction */
687IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
688
689/** Function table for the PUNPCKHBW instruction */
690IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
691/** Function table for the PUNPCKHBD instruction */
692IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
693/** Function table for the PUNPCKHDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
695/** Function table for the PUNPCKHQDQ instruction */
696IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
697
698/** Function table for the PXOR instruction */
699IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
700/** Function table for the PCMPEQB instruction */
701IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
702/** Function table for the PCMPEQW instruction */
703IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
704/** Function table for the PCMPEQD instruction */
705IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
706
707
708#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
709/** What IEM just wrote. */
710uint8_t g_abIemWrote[256];
711/** How much IEM just wrote. */
712size_t g_cbIemWrote;
713#endif
714
715
716/*********************************************************************************************************************************
717* Internal Functions *
718*********************************************************************************************************************************/
719IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
721IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
722IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
723/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
724IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
725IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
726IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
727IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
728IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
729IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
730IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
733IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
734IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
735IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
736#ifdef IEM_WITH_SETJMP
737DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741#endif
742
743IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
744IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
745IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
747IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
752IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
753IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
754IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
756IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
757IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
758IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
759
760#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
761IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
762#endif
763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
764IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
765
766
767
768/**
769 * Sets the pass up status.
770 *
771 * @returns VINF_SUCCESS.
772 * @param pVCpu The cross context virtual CPU structure of the
773 * calling thread.
774 * @param rcPassUp The pass up status. Must be informational.
775 * VINF_SUCCESS is not allowed.
776 */
777IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
778{
779 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
780
781 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
782 if (rcOldPassUp == VINF_SUCCESS)
783 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
784 /* If both are EM scheduling codes, use EM priority rules. */
785 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
786 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
787 {
788 if (rcPassUp < rcOldPassUp)
789 {
790 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 }
793 else
794 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
795 }
796 /* Override EM scheduling with specific status code. */
797 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
798 {
799 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
800 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
801 }
802 /* Don't override specific status code, first come first served. */
803 else
804 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
805 return VINF_SUCCESS;
806}
807
808
809/**
810 * Calculates the CPU mode.
811 *
812 * This is mainly for updating IEMCPU::enmCpuMode.
813 *
814 * @returns CPU mode.
815 * @param pCtx The register context for the CPU.
816 */
817DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
818{
819 if (CPUMIsGuestIn64BitCodeEx(pCtx))
820 return IEMMODE_64BIT;
821 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
822 return IEMMODE_32BIT;
823 return IEMMODE_16BIT;
824}
825
826
827/**
828 * Initializes the execution state.
829 *
830 * @param pVCpu The cross context virtual CPU structure of the
831 * calling thread.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 *
834 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
835 * side-effects in strict builds.
836 */
837DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
838{
839 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
840
841 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
842
843#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
852#endif
853
854#ifdef VBOX_WITH_RAW_MODE_NOT_R0
855 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
856#endif
857 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
858 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
859#ifdef VBOX_STRICT
860 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
861 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
862 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
864 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef;
865 pVCpu->iem.s.uRexReg = 127;
866 pVCpu->iem.s.uRexB = 127;
867 pVCpu->iem.s.uRexIndex = 127;
868 pVCpu->iem.s.iEffSeg = 127;
869 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
870# ifdef IEM_WITH_CODE_TLB
871 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
872 pVCpu->iem.s.pbInstrBuf = NULL;
873 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
874 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;
876 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
877# else
878 pVCpu->iem.s.offOpcode = 127;
879 pVCpu->iem.s.cbOpcode = 127;
880# endif
881#endif
882
883 pVCpu->iem.s.cActiveMappings = 0;
884 pVCpu->iem.s.iNextMapping = 0;
885 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
886 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
887#ifdef VBOX_WITH_RAW_MODE_NOT_R0
888 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
889 && pCtx->cs.u64Base == 0
890 && pCtx->cs.u32Limit == UINT32_MAX
891 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
892 if (!pVCpu->iem.s.fInPatchCode)
893 CPUMRawLeave(pVCpu, VINF_SUCCESS);
894#endif
895
896#ifdef IEM_VERIFICATION_MODE_FULL
897 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
898 pVCpu->iem.s.fNoRem = true;
899#endif
900}
901
902
903/**
904 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
905 *
906 * @param pVCpu The cross context virtual CPU structure of the
907 * calling thread.
908 */
909DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
910{
911 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
912#ifdef IEM_VERIFICATION_MODE_FULL
913 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
914#endif
915#ifdef VBOX_STRICT
916# ifdef IEM_WITH_CODE_TLB
917# else
918 pVCpu->iem.s.cbOpcode = 0;
919# endif
920#else
921 NOREF(pVCpu);
922#endif
923}
924
925
926/**
927 * Initializes the decoder state.
928 *
929 * iemReInitDecoder is mostly a copy of this function.
930 *
931 * @param pVCpu The cross context virtual CPU structure of the
932 * calling thread.
933 * @param fBypassHandlers Whether to bypass access handlers.
934 */
935DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
936{
937 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
938
939 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
940
941#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
950#endif
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
954#endif
955 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
956#ifdef IEM_VERIFICATION_MODE_FULL
957 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
958 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
959#endif
960 IEMMODE enmMode = iemCalcCpuMode(pCtx);
961 pVCpu->iem.s.enmCpuMode = enmMode;
962 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
963 pVCpu->iem.s.enmEffAddrMode = enmMode;
964 if (enmMode != IEMMODE_64BIT)
965 {
966 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
967 pVCpu->iem.s.enmEffOpSize = enmMode;
968 }
969 else
970 {
971 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
972 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
973 }
974 pVCpu->iem.s.fPrefixes = 0;
975 pVCpu->iem.s.uRexReg = 0;
976 pVCpu->iem.s.uRexB = 0;
977 pVCpu->iem.s.uRexIndex = 0;
978 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
979#ifdef IEM_WITH_CODE_TLB
980 pVCpu->iem.s.pbInstrBuf = NULL;
981 pVCpu->iem.s.offInstrNextByte = 0;
982 pVCpu->iem.s.offCurInstrStart = 0;
983# ifdef VBOX_STRICT
984 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
985 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
986 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
987# endif
988#else
989 pVCpu->iem.s.offOpcode = 0;
990 pVCpu->iem.s.cbOpcode = 0;
991#endif
992 pVCpu->iem.s.cActiveMappings = 0;
993 pVCpu->iem.s.iNextMapping = 0;
994 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
995 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
996#ifdef VBOX_WITH_RAW_MODE_NOT_R0
997 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
998 && pCtx->cs.u64Base == 0
999 && pCtx->cs.u32Limit == UINT32_MAX
1000 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1001 if (!pVCpu->iem.s.fInPatchCode)
1002 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1003#endif
1004
1005#ifdef DBGFTRACE_ENABLED
1006 switch (enmMode)
1007 {
1008 case IEMMODE_64BIT:
1009 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1010 break;
1011 case IEMMODE_32BIT:
1012 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1013 break;
1014 case IEMMODE_16BIT:
1015 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1016 break;
1017 }
1018#endif
1019}
1020
1021
1022/**
1023 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1024 *
1025 * This is mostly a copy of iemInitDecoder.
1026 *
1027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1028 */
1029DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1030{
1031 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1032
1033 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1034
1035#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1044#endif
1045
1046 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1047#ifdef IEM_VERIFICATION_MODE_FULL
1048 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1049 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1050#endif
1051 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1052 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1053 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1054 pVCpu->iem.s.enmEffAddrMode = enmMode;
1055 if (enmMode != IEMMODE_64BIT)
1056 {
1057 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1058 pVCpu->iem.s.enmEffOpSize = enmMode;
1059 }
1060 else
1061 {
1062 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1063 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1064 }
1065 pVCpu->iem.s.fPrefixes = 0;
1066 pVCpu->iem.s.uRexReg = 0;
1067 pVCpu->iem.s.uRexB = 0;
1068 pVCpu->iem.s.uRexIndex = 0;
1069 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1070#ifdef IEM_WITH_CODE_TLB
1071 if (pVCpu->iem.s.pbInstrBuf)
1072 {
1073 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1074 - pVCpu->iem.s.uInstrBufPc;
1075 if (off < pVCpu->iem.s.cbInstrBufTotal)
1076 {
1077 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1078 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1079 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1080 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1081 else
1082 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1083 }
1084 else
1085 {
1086 pVCpu->iem.s.pbInstrBuf = NULL;
1087 pVCpu->iem.s.offInstrNextByte = 0;
1088 pVCpu->iem.s.offCurInstrStart = 0;
1089 }
1090 }
1091 else
1092 {
1093 pVCpu->iem.s.offInstrNextByte = 0;
1094 pVCpu->iem.s.offCurInstrStart = 0;
1095 }
1096#else
1097 pVCpu->iem.s.cbOpcode = 0;
1098 pVCpu->iem.s.offOpcode = 0;
1099#endif
1100 Assert(pVCpu->iem.s.cActiveMappings == 0);
1101 pVCpu->iem.s.iNextMapping = 0;
1102 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1103 Assert(pVCpu->iem.s.fBypassHandlers == false);
1104#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1105 if (!pVCpu->iem.s.fInPatchCode)
1106 { /* likely */ }
1107 else
1108 {
1109 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1110 && pCtx->cs.u64Base == 0
1111 && pCtx->cs.u32Limit == UINT32_MAX
1112 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1113 if (!pVCpu->iem.s.fInPatchCode)
1114 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1115 }
1116#endif
1117
1118#ifdef DBGFTRACE_ENABLED
1119 switch (enmMode)
1120 {
1121 case IEMMODE_64BIT:
1122 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1123 break;
1124 case IEMMODE_32BIT:
1125 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1126 break;
1127 case IEMMODE_16BIT:
1128 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1129 break;
1130 }
1131#endif
1132}
1133
1134
1135
1136/**
1137 * Prefetch opcodes the first time when starting executing.
1138 *
1139 * @returns Strict VBox status code.
1140 * @param pVCpu The cross context virtual CPU structure of the
1141 * calling thread.
1142 * @param fBypassHandlers Whether to bypass access handlers.
1143 */
1144IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1145{
1146#ifdef IEM_VERIFICATION_MODE_FULL
1147 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1148#endif
1149 iemInitDecoder(pVCpu, fBypassHandlers);
1150
1151#ifdef IEM_WITH_CODE_TLB
1152 /** @todo Do ITLB lookup here. */
1153
1154#else /* !IEM_WITH_CODE_TLB */
1155
1156 /*
1157 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1158 *
1159 * First translate CS:rIP to a physical address.
1160 */
1161 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1162 uint32_t cbToTryRead;
1163 RTGCPTR GCPtrPC;
1164 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1165 {
1166 cbToTryRead = PAGE_SIZE;
1167 GCPtrPC = pCtx->rip;
1168 if (!IEM_IS_CANONICAL(GCPtrPC))
1169 return iemRaiseGeneralProtectionFault0(pVCpu);
1170 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1171 }
1172 else
1173 {
1174 uint32_t GCPtrPC32 = pCtx->eip;
1175 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1176 if (GCPtrPC32 > pCtx->cs.u32Limit)
1177 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1178 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1179 if (!cbToTryRead) /* overflowed */
1180 {
1181 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1182 cbToTryRead = UINT32_MAX;
1183 }
1184 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1185 Assert(GCPtrPC <= UINT32_MAX);
1186 }
1187
1188# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1189 /* Allow interpretation of patch manager code blocks since they can for
1190 instance throw #PFs for perfectly good reasons. */
1191 if (pVCpu->iem.s.fInPatchCode)
1192 {
1193 size_t cbRead = 0;
1194 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1195 AssertRCReturn(rc, rc);
1196 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1197 return VINF_SUCCESS;
1198 }
1199# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1200
1201 RTGCPHYS GCPhys;
1202 uint64_t fFlags;
1203 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1204 if (RT_FAILURE(rc))
1205 {
1206 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1207 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1210 {
1211 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1212 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1213 }
1214 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1215 {
1216 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1217 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1218 }
1219 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1220 /** @todo Check reserved bits and such stuff. PGM is better at doing
1221 * that, so do it when implementing the guest virtual address
1222 * TLB... */
1223
1224# ifdef IEM_VERIFICATION_MODE_FULL
1225 /*
1226 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1227 * instruction.
1228 */
1229 /** @todo optimize this differently by not using PGMPhysRead. */
1230 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1231 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1232 if ( offPrevOpcodes < cbOldOpcodes
1233 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1234 {
1235 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1236 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1237 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1238 pVCpu->iem.s.cbOpcode = cbNew;
1239 return VINF_SUCCESS;
1240 }
1241# endif
1242
1243 /*
1244 * Read the bytes at this address.
1245 */
1246 PVM pVM = pVCpu->CTX_SUFF(pVM);
1247# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1248 size_t cbActual;
1249 if ( PATMIsEnabled(pVM)
1250 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1251 {
1252 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1253 Assert(cbActual > 0);
1254 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1255 }
1256 else
1257# endif
1258 {
1259 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1260 if (cbToTryRead > cbLeftOnPage)
1261 cbToTryRead = cbLeftOnPage;
1262 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1263 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1264
1265 if (!pVCpu->iem.s.fBypassHandlers)
1266 {
1267 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1268 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1269 { /* likely */ }
1270 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1271 {
1272 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1273 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1274 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1275 }
1276 else
1277 {
1278 Log((RT_SUCCESS(rcStrict)
1279 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1280 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1281 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1282 return rcStrict;
1283 }
1284 }
1285 else
1286 {
1287 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1288 if (RT_SUCCESS(rc))
1289 { /* likely */ }
1290 else
1291 {
1292 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1293 GCPtrPC, GCPhys, rc, cbToTryRead));
1294 return rc;
1295 }
1296 }
1297 pVCpu->iem.s.cbOpcode = cbToTryRead;
1298 }
1299#endif /* !IEM_WITH_CODE_TLB */
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * Invalidates the IEM TLBs.
1306 *
1307 * This is called internally as well as by PGM when moving GC mappings.
1308 *
1309 * @returns
1310 * @param pVCpu The cross context virtual CPU structure of the calling
1311 * thread.
1312 * @param fVmm Set when PGM calls us with a remapping.
1313 */
1314void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm)
1315{
1316#ifdef IEM_WITH_CODE_TLB
1317 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1318 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1319 { /* very likely */ }
1320 else
1321 {
1322 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1323 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1324 while (i-- > 0)
1325 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1326 }
1327#endif
1328
1329#ifdef IEM_WITH_DATA_TLB
1330 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1331 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1332 { /* very likely */ }
1333 else
1334 {
1335 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1336 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1337 while (i-- > 0)
1338 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1339 }
1340#endif
1341 NOREF(pVCpu); NOREF(fVmm);
1342}
1343
1344
1345/**
1346 * Invalidates the host physical aspects of the IEM TLBs.
1347 *
1348 * This is called internally as well as by PGM when moving GC mappings.
1349 *
1350 * @param pVCpu The cross context virtual CPU structure of the calling
1351 * thread.
1352 * @param uTlbPhysRev The revision of the phys stuff.
1353 * @param fFullFlush Whether we're doing a full flush or not.
1354 */
1355void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush)
1356{
1357#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1358 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1359
1360 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1361 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1362
1363 if (!fFlushFlush)
1364 { /* very likely */ }
1365 else
1366 {
1367 unsigned i;
1368# ifdef IEM_WITH_CODE_TLB
1369 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1370 while (i-- > 0)
1371 {
1372 pVCpu->iem.s.CodeTlb.aEntries[i].pMappingR3 = NULL;
1373 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV)
1374 }
1375# endif
1376# ifdef IEM_WITH_DATA_TLB
1377 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1378 while (i-- > 0)
1379 {
1380 pVCpu->iem.s.DataTlb.aEntries[i].pMappingR3 = NULL;
1381 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV)
1382 }
1383# endif
1384 }
1385#endif
1386 NOREF(pVCpu); NOREF(fFullFlush);
1387}
1388
1389
1390#ifdef IEM_WITH_CODE_TLB
1391
1392/**
1393 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1394 * failure and jumps.
1395 *
1396 * We end up here for a number of reasons:
1397 * - pbInstrBuf isn't yet initialized.
1398 * - Advancing beyond the buffer boundrary (e.g. cross page).
1399 * - Advancing beyond the CS segment limit.
1400 * - Fetching from non-mappable page (e.g. MMIO).
1401 *
1402 * @param pVCpu The cross context virtual CPU structure of the
1403 * calling thread.
1404 * @param pvDst Where to return the bytes.
1405 * @param cbDst Number of bytes to read.
1406 *
1407 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1408 */
1409IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1410{
1411 Assert(cbDst <= 8);
1412 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1413
1414 /*
1415 * We might have a partial buffer match, deal with that first to make the
1416 * rest simpler. This is the first part of the cross page/buffer case.
1417 */
1418 if (pVCpu->iem.s.pbInstrBuf != NULL)
1419 {
1420 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1421 {
1422 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1423 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1424 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1425
1426 cbDst -= cbCopy;
1427 pvDst = (uint8_t *)pvDst + cbCopy;
1428 offBuf += cbCopy;
1429 pVCpu->iem.s.offInstrNextByte += offBuf;
1430 }
1431 }
1432
1433 /*
1434 * Check segment limit, figuring how much we're allowed to access at this point.
1435 */
1436 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1437 RTGCPTR GCPtrFirst;
1438 uint32_t cbMaxRead;
1439 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1440 {
1441 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1442 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1443 { /* likely */ }
1444 else
1445 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1446 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1447 }
1448 else
1449 {
1450 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1451 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1452 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1453 { /* likely */ }
1454 else
1455 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1456 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1457 if (cbMaxRead != 0)
1458 { /* likely */ }
1459 else
1460 {
1461 /* Overflowed because address is 0 and limit is max. */
1462 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1463 cbMaxRead = X86_PAGE_SIZE;
1464 }
1465 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1466 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1467 if (cbMaxRead2 < cbMaxRead)
1468 cbMaxRead = cbMaxRead2;
1469 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1470 }
1471
1472 /*
1473 * Get the TLB entry for this piece of code.
1474 */
1475 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1476 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1477 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1478 if (pTlbe->uTag == uTag)
1479 {
1480 /* likely when executing lots of code, otherwise unlikely */
1481# ifdef VBOX_WITH_STATISTICS
1482 pVCpu->iem.s.CodeTlb.cTlbHits++;
1483# endif
1484 }
1485 else
1486 {
1487 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1488 pVCpu->iem.s.CodeTlb.cTlbMissesTag++;
1489# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1490 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1491 {
1492 pTlbe->uTag = uTag;
1493 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1494 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1495 pTlbe->GCPhys = NIL_RTGCPHYS;
1496 pTlbe->pMappingR3 = NULL;
1497 }
1498 else
1499# endif
1500 {
1501 RTGCPHYS GCPhys;
1502 uint64_t fFlags;
1503 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1504 if (RT_FAILURE(rc))
1505 {
1506 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1507 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1508 }
1509
1510 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1511 pTlbe->uTag = uTag;
1512 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1513 pTlbe->GCPhys = GCPhys;
1514 pTlbe->pMappingR3 = NULL;
1515 }
1516 }
1517
1518 /*
1519 * Check TLB access flags.
1520 */
1521 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1522 {
1523 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1524 {
1525 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1526 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1527 }
1528 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1529 {
1530 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1531 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1532 }
1533 }
1534
1535# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1536 /*
1537 * Allow interpretation of patch manager code blocks since they can for
1538 * instance throw #PFs for perfectly good reasons.
1539 */
1540 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1541 { /* no unlikely */ }
1542 else
1543 {
1544
1545 }
1546
1547# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1548
1549# if 0
1550
1551# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1552 /* Allow interpretation of patch manager code blocks since they can for
1553 instance throw #PFs for perfectly good reasons. */
1554 if (pVCpu->iem.s.fInPatchCode)
1555 {
1556 size_t cbRead = 0;
1557 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1558 AssertRCReturn(rc, rc);
1559 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1560 return VINF_SUCCESS;
1561 }
1562# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1563
1564 RTGCPHYS GCPhys;
1565 uint64_t fFlags;
1566 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1567 if (RT_FAILURE(rc))
1568 {
1569 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1570 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1571 }
1572 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1573 {
1574 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1575 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1576 }
1577 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1578 {
1579 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1580 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1581 }
1582 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1583 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1584 /** @todo Check reserved bits and such stuff. PGM is better at doing
1585 * that, so do it when implementing the guest virtual address
1586 * TLB... */
1587
1588 /*
1589 * Read the bytes at this address.
1590 *
1591 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1592 * and since PATM should only patch the start of an instruction there
1593 * should be no need to check again here.
1594 */
1595 if (!pVCpu->iem.s.fBypassHandlers)
1596 {
1597 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1598 cbToTryRead, PGMACCESSORIGIN_IEM);
1599 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1600 { /* likely */ }
1601 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1602 {
1603 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1604 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1605 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1606 }
1607 else
1608 {
1609 Log((RT_SUCCESS(rcStrict)
1610 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1611 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1612 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1613 return rcStrict;
1614 }
1615 }
1616 else
1617 {
1618 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1619 if (RT_SUCCESS(rc))
1620 { /* likely */ }
1621 else
1622 {
1623 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1624 return rc;
1625 }
1626 }
1627 pVCpu->iem.s.cbOpcode += cbToTryRead;
1628 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1629# endif
1630}
1631
1632#else
1633
1634/**
1635 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1636 * exception if it fails.
1637 *
1638 * @returns Strict VBox status code.
1639 * @param pVCpu The cross context virtual CPU structure of the
1640 * calling thread.
1641 * @param cbMin The minimum number of bytes relative offOpcode
1642 * that must be read.
1643 */
1644IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1645{
1646 /*
1647 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1648 *
1649 * First translate CS:rIP to a physical address.
1650 */
1651 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1652 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1653 uint32_t cbToTryRead;
1654 RTGCPTR GCPtrNext;
1655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1656 {
1657 cbToTryRead = PAGE_SIZE;
1658 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1659 if (!IEM_IS_CANONICAL(GCPtrNext))
1660 return iemRaiseGeneralProtectionFault0(pVCpu);
1661 }
1662 else
1663 {
1664 uint32_t GCPtrNext32 = pCtx->eip;
1665 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1666 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1667 if (GCPtrNext32 > pCtx->cs.u32Limit)
1668 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1669 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1670 if (!cbToTryRead) /* overflowed */
1671 {
1672 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1673 cbToTryRead = UINT32_MAX;
1674 /** @todo check out wrapping around the code segment. */
1675 }
1676 if (cbToTryRead < cbMin - cbLeft)
1677 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1678 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1679 }
1680
1681 /* Only read up to the end of the page, and make sure we don't read more
1682 than the opcode buffer can hold. */
1683 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1684 if (cbToTryRead > cbLeftOnPage)
1685 cbToTryRead = cbLeftOnPage;
1686 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1687 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1688/** @todo r=bird: Convert assertion into undefined opcode exception? */
1689 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1690
1691# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1692 /* Allow interpretation of patch manager code blocks since they can for
1693 instance throw #PFs for perfectly good reasons. */
1694 if (pVCpu->iem.s.fInPatchCode)
1695 {
1696 size_t cbRead = 0;
1697 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1698 AssertRCReturn(rc, rc);
1699 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1700 return VINF_SUCCESS;
1701 }
1702# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1703
1704 RTGCPHYS GCPhys;
1705 uint64_t fFlags;
1706 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1707 if (RT_FAILURE(rc))
1708 {
1709 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1710 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1711 }
1712 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1713 {
1714 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1715 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1716 }
1717 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1718 {
1719 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1720 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1721 }
1722 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1723 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1724 /** @todo Check reserved bits and such stuff. PGM is better at doing
1725 * that, so do it when implementing the guest virtual address
1726 * TLB... */
1727
1728 /*
1729 * Read the bytes at this address.
1730 *
1731 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1732 * and since PATM should only patch the start of an instruction there
1733 * should be no need to check again here.
1734 */
1735 if (!pVCpu->iem.s.fBypassHandlers)
1736 {
1737 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1738 cbToTryRead, PGMACCESSORIGIN_IEM);
1739 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1740 { /* likely */ }
1741 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1742 {
1743 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1744 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1745 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1746 }
1747 else
1748 {
1749 Log((RT_SUCCESS(rcStrict)
1750 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1751 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1752 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1753 return rcStrict;
1754 }
1755 }
1756 else
1757 {
1758 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1759 if (RT_SUCCESS(rc))
1760 { /* likely */ }
1761 else
1762 {
1763 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1764 return rc;
1765 }
1766 }
1767 pVCpu->iem.s.cbOpcode += cbToTryRead;
1768 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1769
1770 return VINF_SUCCESS;
1771}
1772
1773#endif /* !IEM_WITH_CODE_TLB */
1774#ifndef IEM_WITH_SETJMP
1775
1776/**
1777 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1778 *
1779 * @returns Strict VBox status code.
1780 * @param pVCpu The cross context virtual CPU structure of the
1781 * calling thread.
1782 * @param pb Where to return the opcode byte.
1783 */
1784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1785{
1786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1787 if (rcStrict == VINF_SUCCESS)
1788 {
1789 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1790 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1791 pVCpu->iem.s.offOpcode = offOpcode + 1;
1792 }
1793 else
1794 *pb = 0;
1795 return rcStrict;
1796}
1797
1798
1799/**
1800 * Fetches the next opcode byte.
1801 *
1802 * @returns Strict VBox status code.
1803 * @param pVCpu The cross context virtual CPU structure of the
1804 * calling thread.
1805 * @param pu8 Where to return the opcode byte.
1806 */
1807DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1808{
1809 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1810 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1811 {
1812 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1813 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1814 return VINF_SUCCESS;
1815 }
1816 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1817}
1818
1819#else /* IEM_WITH_SETJMP */
1820
1821/**
1822 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1823 *
1824 * @returns The opcode byte.
1825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1826 */
1827DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1828{
1829# ifdef IEM_WITH_CODE_TLB
1830 uint8_t u8;
1831 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1832 return u8;
1833# else
1834 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1835 if (rcStrict == VINF_SUCCESS)
1836 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1837 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1838# endif
1839}
1840
1841
1842/**
1843 * Fetches the next opcode byte, longjmp on error.
1844 *
1845 * @returns The opcode byte.
1846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1847 */
1848DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1849{
1850# ifdef IEM_WITH_CODE_TLB
1851 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1852 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1853 if (RT_LIKELY( pbBuf != NULL
1854 && offBuf < pVCpu->iem.s.cbInstrBuf))
1855 {
1856 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1857 return pbBuf[offBuf];
1858 }
1859# else
1860 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1861 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1862 {
1863 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1864 return pVCpu->iem.s.abOpcode[offOpcode];
1865 }
1866# endif
1867 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1868}
1869
1870#endif /* IEM_WITH_SETJMP */
1871
1872/**
1873 * Fetches the next opcode byte, returns automatically on failure.
1874 *
1875 * @param a_pu8 Where to return the opcode byte.
1876 * @remark Implicitly references pVCpu.
1877 */
1878#ifndef IEM_WITH_SETJMP
1879# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1880 do \
1881 { \
1882 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1883 if (rcStrict2 == VINF_SUCCESS) \
1884 { /* likely */ } \
1885 else \
1886 return rcStrict2; \
1887 } while (0)
1888#else
1889# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
1890#endif /* IEM_WITH_SETJMP */
1891
1892
1893#ifndef IEM_WITH_SETJMP
1894/**
1895 * Fetches the next signed byte from the opcode stream.
1896 *
1897 * @returns Strict VBox status code.
1898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1899 * @param pi8 Where to return the signed byte.
1900 */
1901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
1902{
1903 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
1904}
1905#endif /* !IEM_WITH_SETJMP */
1906
1907
1908/**
1909 * Fetches the next signed byte from the opcode stream, returning automatically
1910 * on failure.
1911 *
1912 * @param a_pi8 Where to return the signed byte.
1913 * @remark Implicitly references pVCpu.
1914 */
1915#ifndef IEM_WITH_SETJMP
1916# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1917 do \
1918 { \
1919 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
1920 if (rcStrict2 != VINF_SUCCESS) \
1921 return rcStrict2; \
1922 } while (0)
1923#else /* IEM_WITH_SETJMP */
1924# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1925
1926#endif /* IEM_WITH_SETJMP */
1927
1928#ifndef IEM_WITH_SETJMP
1929
1930/**
1931 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1932 *
1933 * @returns Strict VBox status code.
1934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1935 * @param pu16 Where to return the opcode dword.
1936 */
1937DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
1938{
1939 uint8_t u8;
1940 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1941 if (rcStrict == VINF_SUCCESS)
1942 *pu16 = (int8_t)u8;
1943 return rcStrict;
1944}
1945
1946
1947/**
1948 * Fetches the next signed byte from the opcode stream, extending it to
1949 * unsigned 16-bit.
1950 *
1951 * @returns Strict VBox status code.
1952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1953 * @param pu16 Where to return the unsigned word.
1954 */
1955DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
1956{
1957 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1958 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1959 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
1960
1961 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1962 pVCpu->iem.s.offOpcode = offOpcode + 1;
1963 return VINF_SUCCESS;
1964}
1965
1966#endif /* !IEM_WITH_SETJMP */
1967
1968/**
1969 * Fetches the next signed byte from the opcode stream and sign-extending it to
1970 * a word, returning automatically on failure.
1971 *
1972 * @param a_pu16 Where to return the word.
1973 * @remark Implicitly references pVCpu.
1974 */
1975#ifndef IEM_WITH_SETJMP
1976# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1977 do \
1978 { \
1979 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
1980 if (rcStrict2 != VINF_SUCCESS) \
1981 return rcStrict2; \
1982 } while (0)
1983#else
1984# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1985#endif
1986
1987#ifndef IEM_WITH_SETJMP
1988
1989/**
1990 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1991 *
1992 * @returns Strict VBox status code.
1993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1994 * @param pu32 Where to return the opcode dword.
1995 */
1996DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
1997{
1998 uint8_t u8;
1999 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2000 if (rcStrict == VINF_SUCCESS)
2001 *pu32 = (int8_t)u8;
2002 return rcStrict;
2003}
2004
2005
2006/**
2007 * Fetches the next signed byte from the opcode stream, extending it to
2008 * unsigned 32-bit.
2009 *
2010 * @returns Strict VBox status code.
2011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2012 * @param pu32 Where to return the unsigned dword.
2013 */
2014DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2015{
2016 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2017 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2018 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2019
2020 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2021 pVCpu->iem.s.offOpcode = offOpcode + 1;
2022 return VINF_SUCCESS;
2023}
2024
2025#endif /* !IEM_WITH_SETJMP */
2026
2027/**
2028 * Fetches the next signed byte from the opcode stream and sign-extending it to
2029 * a word, returning automatically on failure.
2030 *
2031 * @param a_pu32 Where to return the word.
2032 * @remark Implicitly references pVCpu.
2033 */
2034#ifndef IEM_WITH_SETJMP
2035#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2036 do \
2037 { \
2038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2039 if (rcStrict2 != VINF_SUCCESS) \
2040 return rcStrict2; \
2041 } while (0)
2042#else
2043# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2044#endif
2045
2046#ifndef IEM_WITH_SETJMP
2047
2048/**
2049 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2050 *
2051 * @returns Strict VBox status code.
2052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2053 * @param pu64 Where to return the opcode qword.
2054 */
2055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2056{
2057 uint8_t u8;
2058 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2059 if (rcStrict == VINF_SUCCESS)
2060 *pu64 = (int8_t)u8;
2061 return rcStrict;
2062}
2063
2064
2065/**
2066 * Fetches the next signed byte from the opcode stream, extending it to
2067 * unsigned 64-bit.
2068 *
2069 * @returns Strict VBox status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param pu64 Where to return the unsigned qword.
2072 */
2073DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2074{
2075 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2076 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2077 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2078
2079 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2080 pVCpu->iem.s.offOpcode = offOpcode + 1;
2081 return VINF_SUCCESS;
2082}
2083
2084#endif /* !IEM_WITH_SETJMP */
2085
2086
2087/**
2088 * Fetches the next signed byte from the opcode stream and sign-extending it to
2089 * a word, returning automatically on failure.
2090 *
2091 * @param a_pu64 Where to return the word.
2092 * @remark Implicitly references pVCpu.
2093 */
2094#ifndef IEM_WITH_SETJMP
2095# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2096 do \
2097 { \
2098 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2099 if (rcStrict2 != VINF_SUCCESS) \
2100 return rcStrict2; \
2101 } while (0)
2102#else
2103# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2104#endif
2105
2106
2107#ifndef IEM_WITH_SETJMP
2108
2109/**
2110 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2111 *
2112 * @returns Strict VBox status code.
2113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2114 * @param pu16 Where to return the opcode word.
2115 */
2116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2117{
2118 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2119 if (rcStrict == VINF_SUCCESS)
2120 {
2121 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2122# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2123 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2124# else
2125 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2126# endif
2127 pVCpu->iem.s.offOpcode = offOpcode + 2;
2128 }
2129 else
2130 *pu16 = 0;
2131 return rcStrict;
2132}
2133
2134
2135/**
2136 * Fetches the next opcode word.
2137 *
2138 * @returns Strict VBox status code.
2139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2140 * @param pu16 Where to return the opcode word.
2141 */
2142DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2143{
2144 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2145 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2146 {
2147 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2148# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2149 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2150# else
2151 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2152# endif
2153 return VINF_SUCCESS;
2154 }
2155 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2156}
2157
2158#else /* IEM_WITH_SETJMP */
2159
2160/**
2161 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2162 *
2163 * @returns The opcode word.
2164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2165 */
2166DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2167{
2168# ifdef IEM_WITH_CODE_TLB
2169 uint16_t u16;
2170 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2171 return u16;
2172# else
2173 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2174 if (rcStrict == VINF_SUCCESS)
2175 {
2176 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2177 pVCpu->iem.s.offOpcode += 2;
2178# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2179 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2180# else
2181 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2182# endif
2183 }
2184 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2185# endif
2186}
2187
2188
2189/**
2190 * Fetches the next opcode word, longjmp on error.
2191 *
2192 * @returns The opcode word.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 */
2195DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2196{
2197# ifdef IEM_WITH_CODE_TLB
2198 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2199 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2200 if (RT_LIKELY( pbBuf != NULL
2201 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2202 {
2203 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2204# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2205 return *(uint16_t const *)&pbBuf[offBuf];
2206# else
2207 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2208# endif
2209 }
2210# else
2211 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2212 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2213 {
2214 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2215# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2216 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2217# else
2218 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2219# endif
2220 }
2221# endif
2222 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2223}
2224
2225#endif /* IEM_WITH_SETJMP */
2226
2227
2228/**
2229 * Fetches the next opcode word, returns automatically on failure.
2230 *
2231 * @param a_pu16 Where to return the opcode word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2244#endif
2245
2246#ifndef IEM_WITH_SETJMP
2247
2248/**
2249 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2250 *
2251 * @returns Strict VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param pu32 Where to return the opcode double word.
2254 */
2255DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2256{
2257 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2258 if (rcStrict == VINF_SUCCESS)
2259 {
2260 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2261 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2262 pVCpu->iem.s.offOpcode = offOpcode + 2;
2263 }
2264 else
2265 *pu32 = 0;
2266 return rcStrict;
2267}
2268
2269
2270/**
2271 * Fetches the next opcode word, zero extending it to a double word.
2272 *
2273 * @returns Strict VBox status code.
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param pu32 Where to return the opcode double word.
2276 */
2277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2278{
2279 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2280 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2281 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2282
2283 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2284 pVCpu->iem.s.offOpcode = offOpcode + 2;
2285 return VINF_SUCCESS;
2286}
2287
2288#endif /* !IEM_WITH_SETJMP */
2289
2290
2291/**
2292 * Fetches the next opcode word and zero extends it to a double word, returns
2293 * automatically on failure.
2294 *
2295 * @param a_pu32 Where to return the opcode double word.
2296 * @remark Implicitly references pVCpu.
2297 */
2298#ifndef IEM_WITH_SETJMP
2299# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2300 do \
2301 { \
2302 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2303 if (rcStrict2 != VINF_SUCCESS) \
2304 return rcStrict2; \
2305 } while (0)
2306#else
2307# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2308#endif
2309
2310#ifndef IEM_WITH_SETJMP
2311
2312/**
2313 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu64 Where to return the opcode quad word.
2318 */
2319DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2320{
2321 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2322 if (rcStrict == VINF_SUCCESS)
2323 {
2324 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2325 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2326 pVCpu->iem.s.offOpcode = offOpcode + 2;
2327 }
2328 else
2329 *pu64 = 0;
2330 return rcStrict;
2331}
2332
2333
2334/**
2335 * Fetches the next opcode word, zero extending it to a quad word.
2336 *
2337 * @returns Strict VBox status code.
2338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2339 * @param pu64 Where to return the opcode quad word.
2340 */
2341DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2342{
2343 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2344 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2345 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2346
2347 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2348 pVCpu->iem.s.offOpcode = offOpcode + 2;
2349 return VINF_SUCCESS;
2350}
2351
2352#endif /* !IEM_WITH_SETJMP */
2353
2354/**
2355 * Fetches the next opcode word and zero extends it to a quad word, returns
2356 * automatically on failure.
2357 *
2358 * @param a_pu64 Where to return the opcode quad word.
2359 * @remark Implicitly references pVCpu.
2360 */
2361#ifndef IEM_WITH_SETJMP
2362# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2363 do \
2364 { \
2365 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2366 if (rcStrict2 != VINF_SUCCESS) \
2367 return rcStrict2; \
2368 } while (0)
2369#else
2370# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2371#endif
2372
2373
2374#ifndef IEM_WITH_SETJMP
2375/**
2376 * Fetches the next signed word from the opcode stream.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pi16 Where to return the signed word.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2383{
2384 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2385}
2386#endif /* !IEM_WITH_SETJMP */
2387
2388
2389/**
2390 * Fetches the next signed word from the opcode stream, returning automatically
2391 * on failure.
2392 *
2393 * @param a_pi16 Where to return the signed word.
2394 * @remark Implicitly references pVCpu.
2395 */
2396#ifndef IEM_WITH_SETJMP
2397# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2398 do \
2399 { \
2400 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2401 if (rcStrict2 != VINF_SUCCESS) \
2402 return rcStrict2; \
2403 } while (0)
2404#else
2405# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2406#endif
2407
2408#ifndef IEM_WITH_SETJMP
2409
2410/**
2411 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2412 *
2413 * @returns Strict VBox status code.
2414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2415 * @param pu32 Where to return the opcode dword.
2416 */
2417DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2418{
2419 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2420 if (rcStrict == VINF_SUCCESS)
2421 {
2422 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2423# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2424 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2425# else
2426 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2427 pVCpu->iem.s.abOpcode[offOpcode + 1],
2428 pVCpu->iem.s.abOpcode[offOpcode + 2],
2429 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2430# endif
2431 pVCpu->iem.s.offOpcode = offOpcode + 4;
2432 }
2433 else
2434 *pu32 = 0;
2435 return rcStrict;
2436}
2437
2438
2439/**
2440 * Fetches the next opcode dword.
2441 *
2442 * @returns Strict VBox status code.
2443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2444 * @param pu32 Where to return the opcode double word.
2445 */
2446DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2447{
2448 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2449 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2450 {
2451 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2452# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2453 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2454# else
2455 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2456 pVCpu->iem.s.abOpcode[offOpcode + 1],
2457 pVCpu->iem.s.abOpcode[offOpcode + 2],
2458 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2459# endif
2460 return VINF_SUCCESS;
2461 }
2462 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2463}
2464
2465#else /* !IEM_WITH_SETJMP */
2466
2467/**
2468 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2469 *
2470 * @returns The opcode dword.
2471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2472 */
2473DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2474{
2475# ifdef IEM_WITH_CODE_TLB
2476 uint32_t u32;
2477 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2478 return u32;
2479# else
2480 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2481 if (rcStrict == VINF_SUCCESS)
2482 {
2483 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2484 pVCpu->iem.s.offOpcode = offOpcode + 4;
2485# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2486 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2487# else
2488 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2489 pVCpu->iem.s.abOpcode[offOpcode + 1],
2490 pVCpu->iem.s.abOpcode[offOpcode + 2],
2491 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2492# endif
2493 }
2494 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2495# endif
2496}
2497
2498
2499/**
2500 * Fetches the next opcode dword, longjmp on error.
2501 *
2502 * @returns The opcode dword.
2503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2504 */
2505DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2506{
2507# ifdef IEM_WITH_CODE_TLB
2508 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2509 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2510 if (RT_LIKELY( pbBuf != NULL
2511 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2512 {
2513 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint32_t const *)&pbBuf[offBuf];
2516# else
2517 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2518 pbBuf[offBuf + 1],
2519 pbBuf[offBuf + 2],
2520 pbBuf[offBuf + 3]);
2521# endif
2522 }
2523# else
2524 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2525 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2526 {
2527 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2528# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2529 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2530# else
2531 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2532 pVCpu->iem.s.abOpcode[offOpcode + 1],
2533 pVCpu->iem.s.abOpcode[offOpcode + 2],
2534 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2535# endif
2536 }
2537# endif
2538 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2539}
2540
2541#endif /* !IEM_WITH_SETJMP */
2542
2543
2544/**
2545 * Fetches the next opcode dword, returns automatically on failure.
2546 *
2547 * @param a_pu32 Where to return the opcode dword.
2548 * @remark Implicitly references pVCpu.
2549 */
2550#ifndef IEM_WITH_SETJMP
2551# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2552 do \
2553 { \
2554 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2555 if (rcStrict2 != VINF_SUCCESS) \
2556 return rcStrict2; \
2557 } while (0)
2558#else
2559# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2560#endif
2561
2562#ifndef IEM_WITH_SETJMP
2563
2564/**
2565 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2566 *
2567 * @returns Strict VBox status code.
2568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2569 * @param pu64 Where to return the opcode dword.
2570 */
2571DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2572{
2573 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2574 if (rcStrict == VINF_SUCCESS)
2575 {
2576 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2577 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2578 pVCpu->iem.s.abOpcode[offOpcode + 1],
2579 pVCpu->iem.s.abOpcode[offOpcode + 2],
2580 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2581 pVCpu->iem.s.offOpcode = offOpcode + 4;
2582 }
2583 else
2584 *pu64 = 0;
2585 return rcStrict;
2586}
2587
2588
2589/**
2590 * Fetches the next opcode dword, zero extending it to a quad word.
2591 *
2592 * @returns Strict VBox status code.
2593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2594 * @param pu64 Where to return the opcode quad word.
2595 */
2596DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2597{
2598 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2599 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2600 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2601
2602 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2603 pVCpu->iem.s.abOpcode[offOpcode + 1],
2604 pVCpu->iem.s.abOpcode[offOpcode + 2],
2605 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2606 pVCpu->iem.s.offOpcode = offOpcode + 4;
2607 return VINF_SUCCESS;
2608}
2609
2610#endif /* !IEM_WITH_SETJMP */
2611
2612
2613/**
2614 * Fetches the next opcode dword and zero extends it to a quad word, returns
2615 * automatically on failure.
2616 *
2617 * @param a_pu64 Where to return the opcode quad word.
2618 * @remark Implicitly references pVCpu.
2619 */
2620#ifndef IEM_WITH_SETJMP
2621# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2622 do \
2623 { \
2624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2625 if (rcStrict2 != VINF_SUCCESS) \
2626 return rcStrict2; \
2627 } while (0)
2628#else
2629# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2630#endif
2631
2632
2633#ifndef IEM_WITH_SETJMP
2634/**
2635 * Fetches the next signed double word from the opcode stream.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pi32 Where to return the signed double word.
2640 */
2641DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2642{
2643 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2644}
2645#endif
2646
2647/**
2648 * Fetches the next signed double word from the opcode stream, returning
2649 * automatically on failure.
2650 *
2651 * @param a_pi32 Where to return the signed double word.
2652 * @remark Implicitly references pVCpu.
2653 */
2654#ifndef IEM_WITH_SETJMP
2655# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2656 do \
2657 { \
2658 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2659 if (rcStrict2 != VINF_SUCCESS) \
2660 return rcStrict2; \
2661 } while (0)
2662#else
2663# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2664#endif
2665
2666#ifndef IEM_WITH_SETJMP
2667
2668/**
2669 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2670 *
2671 * @returns Strict VBox status code.
2672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2673 * @param pu64 Where to return the opcode qword.
2674 */
2675DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2676{
2677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2678 if (rcStrict == VINF_SUCCESS)
2679 {
2680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2681 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2682 pVCpu->iem.s.abOpcode[offOpcode + 1],
2683 pVCpu->iem.s.abOpcode[offOpcode + 2],
2684 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2685 pVCpu->iem.s.offOpcode = offOpcode + 4;
2686 }
2687 else
2688 *pu64 = 0;
2689 return rcStrict;
2690}
2691
2692
2693/**
2694 * Fetches the next opcode dword, sign extending it into a quad word.
2695 *
2696 * @returns Strict VBox status code.
2697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2698 * @param pu64 Where to return the opcode quad word.
2699 */
2700DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2701{
2702 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2703 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2704 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2705
2706 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2707 pVCpu->iem.s.abOpcode[offOpcode + 1],
2708 pVCpu->iem.s.abOpcode[offOpcode + 2],
2709 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2710 *pu64 = i32;
2711 pVCpu->iem.s.offOpcode = offOpcode + 4;
2712 return VINF_SUCCESS;
2713}
2714
2715#endif /* !IEM_WITH_SETJMP */
2716
2717
2718/**
2719 * Fetches the next opcode double word and sign extends it to a quad word,
2720 * returns automatically on failure.
2721 *
2722 * @param a_pu64 Where to return the opcode quad word.
2723 * @remark Implicitly references pVCpu.
2724 */
2725#ifndef IEM_WITH_SETJMP
2726# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2727 do \
2728 { \
2729 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2730 if (rcStrict2 != VINF_SUCCESS) \
2731 return rcStrict2; \
2732 } while (0)
2733#else
2734# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2735#endif
2736
2737#ifndef IEM_WITH_SETJMP
2738
2739/**
2740 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2741 *
2742 * @returns Strict VBox status code.
2743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2744 * @param pu64 Where to return the opcode qword.
2745 */
2746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2747{
2748 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2749 if (rcStrict == VINF_SUCCESS)
2750 {
2751 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2752# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2753 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2754# else
2755 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2756 pVCpu->iem.s.abOpcode[offOpcode + 1],
2757 pVCpu->iem.s.abOpcode[offOpcode + 2],
2758 pVCpu->iem.s.abOpcode[offOpcode + 3],
2759 pVCpu->iem.s.abOpcode[offOpcode + 4],
2760 pVCpu->iem.s.abOpcode[offOpcode + 5],
2761 pVCpu->iem.s.abOpcode[offOpcode + 6],
2762 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2763# endif
2764 pVCpu->iem.s.offOpcode = offOpcode + 8;
2765 }
2766 else
2767 *pu64 = 0;
2768 return rcStrict;
2769}
2770
2771
2772/**
2773 * Fetches the next opcode qword.
2774 *
2775 * @returns Strict VBox status code.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 * @param pu64 Where to return the opcode qword.
2778 */
2779DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2780{
2781 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2782 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2783 {
2784# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2785 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2786# else
2787 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2788 pVCpu->iem.s.abOpcode[offOpcode + 1],
2789 pVCpu->iem.s.abOpcode[offOpcode + 2],
2790 pVCpu->iem.s.abOpcode[offOpcode + 3],
2791 pVCpu->iem.s.abOpcode[offOpcode + 4],
2792 pVCpu->iem.s.abOpcode[offOpcode + 5],
2793 pVCpu->iem.s.abOpcode[offOpcode + 6],
2794 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2795# endif
2796 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2797 return VINF_SUCCESS;
2798 }
2799 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2800}
2801
2802#else /* IEM_WITH_SETJMP */
2803
2804/**
2805 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2806 *
2807 * @returns The opcode qword.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 */
2810DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2811{
2812# ifdef IEM_WITH_CODE_TLB
2813 uint64_t u64;
2814 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2815 return u64;
2816# else
2817 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2818 if (rcStrict == VINF_SUCCESS)
2819 {
2820 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2821 pVCpu->iem.s.offOpcode = offOpcode + 8;
2822# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2823 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2824# else
2825 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2826 pVCpu->iem.s.abOpcode[offOpcode + 1],
2827 pVCpu->iem.s.abOpcode[offOpcode + 2],
2828 pVCpu->iem.s.abOpcode[offOpcode + 3],
2829 pVCpu->iem.s.abOpcode[offOpcode + 4],
2830 pVCpu->iem.s.abOpcode[offOpcode + 5],
2831 pVCpu->iem.s.abOpcode[offOpcode + 6],
2832 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2833# endif
2834 }
2835 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2836# endif
2837}
2838
2839
2840/**
2841 * Fetches the next opcode qword, longjmp on error.
2842 *
2843 * @returns The opcode qword.
2844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2845 */
2846DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2847{
2848# ifdef IEM_WITH_CODE_TLB
2849 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2850 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2851 if (RT_LIKELY( pbBuf != NULL
2852 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2853 {
2854 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2855# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2856 return *(uint64_t const *)&pbBuf[offBuf];
2857# else
2858 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2859 pbBuf[offBuf + 1],
2860 pbBuf[offBuf + 2],
2861 pbBuf[offBuf + 3],
2862 pbBuf[offBuf + 4],
2863 pbBuf[offBuf + 5],
2864 pbBuf[offBuf + 6],
2865 pbBuf[offBuf + 7]);
2866# endif
2867 }
2868# else
2869 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2870 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2871 {
2872 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2873# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2874 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2875# else
2876 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2877 pVCpu->iem.s.abOpcode[offOpcode + 1],
2878 pVCpu->iem.s.abOpcode[offOpcode + 2],
2879 pVCpu->iem.s.abOpcode[offOpcode + 3],
2880 pVCpu->iem.s.abOpcode[offOpcode + 4],
2881 pVCpu->iem.s.abOpcode[offOpcode + 5],
2882 pVCpu->iem.s.abOpcode[offOpcode + 6],
2883 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2884# endif
2885 }
2886# endif
2887 return iemOpcodeGetNextU64SlowJmp(pVCpu);
2888}
2889
2890#endif /* IEM_WITH_SETJMP */
2891
2892/**
2893 * Fetches the next opcode quad word, returns automatically on failure.
2894 *
2895 * @param a_pu64 Where to return the opcode quad word.
2896 * @remark Implicitly references pVCpu.
2897 */
2898#ifndef IEM_WITH_SETJMP
2899# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
2900 do \
2901 { \
2902 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
2903 if (rcStrict2 != VINF_SUCCESS) \
2904 return rcStrict2; \
2905 } while (0)
2906#else
2907# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
2908#endif
2909
2910
2911/** @name Misc Worker Functions.
2912 * @{
2913 */
2914
2915
2916/**
2917 * Validates a new SS segment.
2918 *
2919 * @returns VBox strict status code.
2920 * @param pVCpu The cross context virtual CPU structure of the
2921 * calling thread.
2922 * @param pCtx The CPU context.
2923 * @param NewSS The new SS selctor.
2924 * @param uCpl The CPL to load the stack for.
2925 * @param pDesc Where to return the descriptor.
2926 */
2927IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2928{
2929 NOREF(pCtx);
2930
2931 /* Null selectors are not allowed (we're not called for dispatching
2932 interrupts with SS=0 in long mode). */
2933 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2934 {
2935 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2936 return iemRaiseTaskSwitchFault0(pVCpu);
2937 }
2938
2939 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2940 if ((NewSS & X86_SEL_RPL) != uCpl)
2941 {
2942 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2943 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2944 }
2945
2946 /*
2947 * Read the descriptor.
2948 */
2949 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2950 if (rcStrict != VINF_SUCCESS)
2951 return rcStrict;
2952
2953 /*
2954 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2955 */
2956 if (!pDesc->Legacy.Gen.u1DescType)
2957 {
2958 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2959 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2960 }
2961
2962 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2963 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2964 {
2965 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2966 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2967 }
2968 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2969 {
2970 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2971 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2972 }
2973
2974 /* Is it there? */
2975 /** @todo testcase: Is this checked before the canonical / limit check below? */
2976 if (!pDesc->Legacy.Gen.u1Present)
2977 {
2978 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2979 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2980 }
2981
2982 return VINF_SUCCESS;
2983}
2984
2985
2986/**
2987 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2988 * not.
2989 *
2990 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2991 * @param a_pCtx The CPU context.
2992 */
2993#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2994# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2995 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
2996 ? (a_pCtx)->eflags.u \
2997 : CPUMRawGetEFlags(a_pVCpu) )
2998#else
2999# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3000 ( (a_pCtx)->eflags.u )
3001#endif
3002
3003/**
3004 * Updates the EFLAGS in the correct manner wrt. PATM.
3005 *
3006 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param a_pCtx The CPU context.
3008 * @param a_fEfl The new EFLAGS.
3009 */
3010#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3011# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3012 do { \
3013 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3014 (a_pCtx)->eflags.u = (a_fEfl); \
3015 else \
3016 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3017 } while (0)
3018#else
3019# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3020 do { \
3021 (a_pCtx)->eflags.u = (a_fEfl); \
3022 } while (0)
3023#endif
3024
3025
3026/** @} */
3027
3028/** @name Raising Exceptions.
3029 *
3030 * @{
3031 */
3032
3033/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3034 * @{ */
3035/** CPU exception. */
3036#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3037/** External interrupt (from PIC, APIC, whatever). */
3038#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3039/** Software interrupt (int or into, not bound).
3040 * Returns to the following instruction */
3041#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3042/** Takes an error code. */
3043#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3044/** Takes a CR2. */
3045#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3046/** Generated by the breakpoint instruction. */
3047#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3048/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3049#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3050/** @} */
3051
3052
3053/**
3054 * Loads the specified stack far pointer from the TSS.
3055 *
3056 * @returns VBox strict status code.
3057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3058 * @param pCtx The CPU context.
3059 * @param uCpl The CPL to load the stack for.
3060 * @param pSelSS Where to return the new stack segment.
3061 * @param puEsp Where to return the new stack pointer.
3062 */
3063IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3064 PRTSEL pSelSS, uint32_t *puEsp)
3065{
3066 VBOXSTRICTRC rcStrict;
3067 Assert(uCpl < 4);
3068
3069 switch (pCtx->tr.Attr.n.u4Type)
3070 {
3071 /*
3072 * 16-bit TSS (X86TSS16).
3073 */
3074 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3075 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3076 {
3077 uint32_t off = uCpl * 4 + 2;
3078 if (off + 4 <= pCtx->tr.u32Limit)
3079 {
3080 /** @todo check actual access pattern here. */
3081 uint32_t u32Tmp = 0; /* gcc maybe... */
3082 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3083 if (rcStrict == VINF_SUCCESS)
3084 {
3085 *puEsp = RT_LOWORD(u32Tmp);
3086 *pSelSS = RT_HIWORD(u32Tmp);
3087 return VINF_SUCCESS;
3088 }
3089 }
3090 else
3091 {
3092 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3093 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3094 }
3095 break;
3096 }
3097
3098 /*
3099 * 32-bit TSS (X86TSS32).
3100 */
3101 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3102 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3103 {
3104 uint32_t off = uCpl * 8 + 4;
3105 if (off + 7 <= pCtx->tr.u32Limit)
3106 {
3107/** @todo check actual access pattern here. */
3108 uint64_t u64Tmp;
3109 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3110 if (rcStrict == VINF_SUCCESS)
3111 {
3112 *puEsp = u64Tmp & UINT32_MAX;
3113 *pSelSS = (RTSEL)(u64Tmp >> 32);
3114 return VINF_SUCCESS;
3115 }
3116 }
3117 else
3118 {
3119 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3120 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3121 }
3122 break;
3123 }
3124
3125 default:
3126 AssertFailed();
3127 rcStrict = VERR_IEM_IPE_4;
3128 break;
3129 }
3130
3131 *puEsp = 0; /* make gcc happy */
3132 *pSelSS = 0; /* make gcc happy */
3133 return rcStrict;
3134}
3135
3136
3137/**
3138 * Loads the specified stack pointer from the 64-bit TSS.
3139 *
3140 * @returns VBox strict status code.
3141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3142 * @param pCtx The CPU context.
3143 * @param uCpl The CPL to load the stack for.
3144 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3145 * @param puRsp Where to return the new stack pointer.
3146 */
3147IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3148{
3149 Assert(uCpl < 4);
3150 Assert(uIst < 8);
3151 *puRsp = 0; /* make gcc happy */
3152
3153 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3154
3155 uint32_t off;
3156 if (uIst)
3157 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3158 else
3159 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3160 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3161 {
3162 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3163 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3164 }
3165
3166 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3167}
3168
3169
3170/**
3171 * Adjust the CPU state according to the exception being raised.
3172 *
3173 * @param pCtx The CPU context.
3174 * @param u8Vector The exception that has been raised.
3175 */
3176DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3177{
3178 switch (u8Vector)
3179 {
3180 case X86_XCPT_DB:
3181 pCtx->dr[7] &= ~X86_DR7_GD;
3182 break;
3183 /** @todo Read the AMD and Intel exception reference... */
3184 }
3185}
3186
3187
3188/**
3189 * Implements exceptions and interrupts for real mode.
3190 *
3191 * @returns VBox strict status code.
3192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3193 * @param pCtx The CPU context.
3194 * @param cbInstr The number of bytes to offset rIP by in the return
3195 * address.
3196 * @param u8Vector The interrupt / exception vector number.
3197 * @param fFlags The flags.
3198 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3199 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3200 */
3201IEM_STATIC VBOXSTRICTRC
3202iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3203 PCPUMCTX pCtx,
3204 uint8_t cbInstr,
3205 uint8_t u8Vector,
3206 uint32_t fFlags,
3207 uint16_t uErr,
3208 uint64_t uCr2)
3209{
3210 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3211 NOREF(uErr); NOREF(uCr2);
3212
3213 /*
3214 * Read the IDT entry.
3215 */
3216 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3217 {
3218 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3219 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3220 }
3221 RTFAR16 Idte;
3222 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3223 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3224 return rcStrict;
3225
3226 /*
3227 * Push the stack frame.
3228 */
3229 uint16_t *pu16Frame;
3230 uint64_t uNewRsp;
3231 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3232 if (rcStrict != VINF_SUCCESS)
3233 return rcStrict;
3234
3235 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3236#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3237 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3238 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3239 fEfl |= UINT16_C(0xf000);
3240#endif
3241 pu16Frame[2] = (uint16_t)fEfl;
3242 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3243 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3244 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3245 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3246 return rcStrict;
3247
3248 /*
3249 * Load the vector address into cs:ip and make exception specific state
3250 * adjustments.
3251 */
3252 pCtx->cs.Sel = Idte.sel;
3253 pCtx->cs.ValidSel = Idte.sel;
3254 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3255 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3256 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3257 pCtx->rip = Idte.off;
3258 fEfl &= ~X86_EFL_IF;
3259 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3260
3261 /** @todo do we actually do this in real mode? */
3262 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3263 iemRaiseXcptAdjustState(pCtx, u8Vector);
3264
3265 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3266}
3267
3268
3269/**
3270 * Loads a NULL data selector into when coming from V8086 mode.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3273 * @param pSReg Pointer to the segment register.
3274 */
3275IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3276{
3277 pSReg->Sel = 0;
3278 pSReg->ValidSel = 0;
3279 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3280 {
3281 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3282 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3283 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3284 }
3285 else
3286 {
3287 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3288 /** @todo check this on AMD-V */
3289 pSReg->u64Base = 0;
3290 pSReg->u32Limit = 0;
3291 }
3292}
3293
3294
3295/**
3296 * Loads a segment selector during a task switch in V8086 mode.
3297 *
3298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3299 * @param pSReg Pointer to the segment register.
3300 * @param uSel The selector value to load.
3301 */
3302IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3303{
3304 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3305 pSReg->Sel = uSel;
3306 pSReg->ValidSel = uSel;
3307 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3308 pSReg->u64Base = uSel << 4;
3309 pSReg->u32Limit = 0xffff;
3310 pSReg->Attr.u = 0xf3;
3311}
3312
3313
3314/**
3315 * Loads a NULL data selector into a selector register, both the hidden and
3316 * visible parts, in protected mode.
3317 *
3318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3319 * @param pSReg Pointer to the segment register.
3320 * @param uRpl The RPL.
3321 */
3322IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3323{
3324 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3325 * data selector in protected mode. */
3326 pSReg->Sel = uRpl;
3327 pSReg->ValidSel = uRpl;
3328 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3329 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3330 {
3331 /* VT-x (Intel 3960x) observed doing something like this. */
3332 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3333 pSReg->u32Limit = UINT32_MAX;
3334 pSReg->u64Base = 0;
3335 }
3336 else
3337 {
3338 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3339 pSReg->u32Limit = 0;
3340 pSReg->u64Base = 0;
3341 }
3342}
3343
3344
3345/**
3346 * Loads a segment selector during a task switch in protected mode.
3347 *
3348 * In this task switch scenario, we would throw \#TS exceptions rather than
3349 * \#GPs.
3350 *
3351 * @returns VBox strict status code.
3352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3353 * @param pSReg Pointer to the segment register.
3354 * @param uSel The new selector value.
3355 *
3356 * @remarks This does _not_ handle CS or SS.
3357 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3358 */
3359IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3360{
3361 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3362
3363 /* Null data selector. */
3364 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3365 {
3366 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3367 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3368 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3369 return VINF_SUCCESS;
3370 }
3371
3372 /* Fetch the descriptor. */
3373 IEMSELDESC Desc;
3374 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3375 if (rcStrict != VINF_SUCCESS)
3376 {
3377 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3378 VBOXSTRICTRC_VAL(rcStrict)));
3379 return rcStrict;
3380 }
3381
3382 /* Must be a data segment or readable code segment. */
3383 if ( !Desc.Legacy.Gen.u1DescType
3384 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3385 {
3386 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3387 Desc.Legacy.Gen.u4Type));
3388 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3389 }
3390
3391 /* Check privileges for data segments and non-conforming code segments. */
3392 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3393 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3394 {
3395 /* The RPL and the new CPL must be less than or equal to the DPL. */
3396 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3397 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3398 {
3399 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3400 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3401 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3402 }
3403 }
3404
3405 /* Is it there? */
3406 if (!Desc.Legacy.Gen.u1Present)
3407 {
3408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3409 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3410 }
3411
3412 /* The base and limit. */
3413 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3414 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3415
3416 /*
3417 * Ok, everything checked out fine. Now set the accessed bit before
3418 * committing the result into the registers.
3419 */
3420 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3421 {
3422 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3423 if (rcStrict != VINF_SUCCESS)
3424 return rcStrict;
3425 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3426 }
3427
3428 /* Commit */
3429 pSReg->Sel = uSel;
3430 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3431 pSReg->u32Limit = cbLimit;
3432 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3433 pSReg->ValidSel = uSel;
3434 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3435 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3436 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3437
3438 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3439 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3440 return VINF_SUCCESS;
3441}
3442
3443
3444/**
3445 * Performs a task switch.
3446 *
3447 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3448 * caller is responsible for performing the necessary checks (like DPL, TSS
3449 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3450 * reference for JMP, CALL, IRET.
3451 *
3452 * If the task switch is the due to a software interrupt or hardware exception,
3453 * the caller is responsible for validating the TSS selector and descriptor. See
3454 * Intel Instruction reference for INT n.
3455 *
3456 * @returns VBox strict status code.
3457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param pCtx The CPU context.
3459 * @param enmTaskSwitch What caused this task switch.
3460 * @param uNextEip The EIP effective after the task switch.
3461 * @param fFlags The flags.
3462 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3463 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3464 * @param SelTSS The TSS selector of the new task.
3465 * @param pNewDescTSS Pointer to the new TSS descriptor.
3466 */
3467IEM_STATIC VBOXSTRICTRC
3468iemTaskSwitch(PVMCPU pVCpu,
3469 PCPUMCTX pCtx,
3470 IEMTASKSWITCH enmTaskSwitch,
3471 uint32_t uNextEip,
3472 uint32_t fFlags,
3473 uint16_t uErr,
3474 uint64_t uCr2,
3475 RTSEL SelTSS,
3476 PIEMSELDESC pNewDescTSS)
3477{
3478 Assert(!IEM_IS_REAL_MODE(pVCpu));
3479 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3480
3481 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3482 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3483 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3484 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3485 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3486
3487 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3488 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3489
3490 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3491 fIsNewTSS386, pCtx->eip, uNextEip));
3492
3493 /* Update CR2 in case it's a page-fault. */
3494 /** @todo This should probably be done much earlier in IEM/PGM. See
3495 * @bugref{5653#c49}. */
3496 if (fFlags & IEM_XCPT_FLAGS_CR2)
3497 pCtx->cr2 = uCr2;
3498
3499 /*
3500 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3501 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3502 */
3503 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3504 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3505 if (uNewTSSLimit < uNewTSSLimitMin)
3506 {
3507 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3508 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3509 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3510 }
3511
3512 /*
3513 * Check the current TSS limit. The last written byte to the current TSS during the
3514 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3515 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3516 *
3517 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3518 * end up with smaller than "legal" TSS limits.
3519 */
3520 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3521 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3522 if (uCurTSSLimit < uCurTSSLimitMin)
3523 {
3524 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3525 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3527 }
3528
3529 /*
3530 * Verify that the new TSS can be accessed and map it. Map only the required contents
3531 * and not the entire TSS.
3532 */
3533 void *pvNewTSS;
3534 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3535 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3536 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3537 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3538 * not perform correct translation if this happens. See Intel spec. 7.2.1
3539 * "Task-State Segment" */
3540 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3541 if (rcStrict != VINF_SUCCESS)
3542 {
3543 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3544 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3545 return rcStrict;
3546 }
3547
3548 /*
3549 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3550 */
3551 uint32_t u32EFlags = pCtx->eflags.u32;
3552 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3553 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3554 {
3555 PX86DESC pDescCurTSS;
3556 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3557 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3558 if (rcStrict != VINF_SUCCESS)
3559 {
3560 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3561 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3562 return rcStrict;
3563 }
3564
3565 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3566 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3567 if (rcStrict != VINF_SUCCESS)
3568 {
3569 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3570 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3571 return rcStrict;
3572 }
3573
3574 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3575 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3576 {
3577 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3578 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3579 u32EFlags &= ~X86_EFL_NT;
3580 }
3581 }
3582
3583 /*
3584 * Save the CPU state into the current TSS.
3585 */
3586 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3587 if (GCPtrNewTSS == GCPtrCurTSS)
3588 {
3589 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3590 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3591 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3592 }
3593 if (fIsNewTSS386)
3594 {
3595 /*
3596 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3597 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3598 */
3599 void *pvCurTSS32;
3600 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3601 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3602 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3603 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3604 if (rcStrict != VINF_SUCCESS)
3605 {
3606 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3607 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3608 return rcStrict;
3609 }
3610
3611 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3612 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3613 pCurTSS32->eip = uNextEip;
3614 pCurTSS32->eflags = u32EFlags;
3615 pCurTSS32->eax = pCtx->eax;
3616 pCurTSS32->ecx = pCtx->ecx;
3617 pCurTSS32->edx = pCtx->edx;
3618 pCurTSS32->ebx = pCtx->ebx;
3619 pCurTSS32->esp = pCtx->esp;
3620 pCurTSS32->ebp = pCtx->ebp;
3621 pCurTSS32->esi = pCtx->esi;
3622 pCurTSS32->edi = pCtx->edi;
3623 pCurTSS32->es = pCtx->es.Sel;
3624 pCurTSS32->cs = pCtx->cs.Sel;
3625 pCurTSS32->ss = pCtx->ss.Sel;
3626 pCurTSS32->ds = pCtx->ds.Sel;
3627 pCurTSS32->fs = pCtx->fs.Sel;
3628 pCurTSS32->gs = pCtx->gs.Sel;
3629
3630 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3631 if (rcStrict != VINF_SUCCESS)
3632 {
3633 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3634 VBOXSTRICTRC_VAL(rcStrict)));
3635 return rcStrict;
3636 }
3637 }
3638 else
3639 {
3640 /*
3641 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3642 */
3643 void *pvCurTSS16;
3644 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3645 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3646 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3647 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3648 if (rcStrict != VINF_SUCCESS)
3649 {
3650 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3651 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3652 return rcStrict;
3653 }
3654
3655 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3656 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3657 pCurTSS16->ip = uNextEip;
3658 pCurTSS16->flags = u32EFlags;
3659 pCurTSS16->ax = pCtx->ax;
3660 pCurTSS16->cx = pCtx->cx;
3661 pCurTSS16->dx = pCtx->dx;
3662 pCurTSS16->bx = pCtx->bx;
3663 pCurTSS16->sp = pCtx->sp;
3664 pCurTSS16->bp = pCtx->bp;
3665 pCurTSS16->si = pCtx->si;
3666 pCurTSS16->di = pCtx->di;
3667 pCurTSS16->es = pCtx->es.Sel;
3668 pCurTSS16->cs = pCtx->cs.Sel;
3669 pCurTSS16->ss = pCtx->ss.Sel;
3670 pCurTSS16->ds = pCtx->ds.Sel;
3671
3672 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3673 if (rcStrict != VINF_SUCCESS)
3674 {
3675 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3676 VBOXSTRICTRC_VAL(rcStrict)));
3677 return rcStrict;
3678 }
3679 }
3680
3681 /*
3682 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3683 */
3684 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3685 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3686 {
3687 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3688 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3689 pNewTSS->selPrev = pCtx->tr.Sel;
3690 }
3691
3692 /*
3693 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3694 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3695 */
3696 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3697 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3698 bool fNewDebugTrap;
3699 if (fIsNewTSS386)
3700 {
3701 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3702 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3703 uNewEip = pNewTSS32->eip;
3704 uNewEflags = pNewTSS32->eflags;
3705 uNewEax = pNewTSS32->eax;
3706 uNewEcx = pNewTSS32->ecx;
3707 uNewEdx = pNewTSS32->edx;
3708 uNewEbx = pNewTSS32->ebx;
3709 uNewEsp = pNewTSS32->esp;
3710 uNewEbp = pNewTSS32->ebp;
3711 uNewEsi = pNewTSS32->esi;
3712 uNewEdi = pNewTSS32->edi;
3713 uNewES = pNewTSS32->es;
3714 uNewCS = pNewTSS32->cs;
3715 uNewSS = pNewTSS32->ss;
3716 uNewDS = pNewTSS32->ds;
3717 uNewFS = pNewTSS32->fs;
3718 uNewGS = pNewTSS32->gs;
3719 uNewLdt = pNewTSS32->selLdt;
3720 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3721 }
3722 else
3723 {
3724 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3725 uNewCr3 = 0;
3726 uNewEip = pNewTSS16->ip;
3727 uNewEflags = pNewTSS16->flags;
3728 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3729 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3730 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3731 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3732 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3733 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3734 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3735 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3736 uNewES = pNewTSS16->es;
3737 uNewCS = pNewTSS16->cs;
3738 uNewSS = pNewTSS16->ss;
3739 uNewDS = pNewTSS16->ds;
3740 uNewFS = 0;
3741 uNewGS = 0;
3742 uNewLdt = pNewTSS16->selLdt;
3743 fNewDebugTrap = false;
3744 }
3745
3746 if (GCPtrNewTSS == GCPtrCurTSS)
3747 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3748 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3749
3750 /*
3751 * We're done accessing the new TSS.
3752 */
3753 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3754 if (rcStrict != VINF_SUCCESS)
3755 {
3756 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3757 return rcStrict;
3758 }
3759
3760 /*
3761 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3762 */
3763 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3764 {
3765 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3766 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3767 if (rcStrict != VINF_SUCCESS)
3768 {
3769 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3770 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3771 return rcStrict;
3772 }
3773
3774 /* Check that the descriptor indicates the new TSS is available (not busy). */
3775 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3776 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3777 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3778
3779 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3780 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3781 if (rcStrict != VINF_SUCCESS)
3782 {
3783 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3784 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3785 return rcStrict;
3786 }
3787 }
3788
3789 /*
3790 * From this point on, we're technically in the new task. We will defer exceptions
3791 * until the completion of the task switch but before executing any instructions in the new task.
3792 */
3793 pCtx->tr.Sel = SelTSS;
3794 pCtx->tr.ValidSel = SelTSS;
3795 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3796 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3797 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3798 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3800
3801 /* Set the busy bit in TR. */
3802 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3803 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3804 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3805 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3806 {
3807 uNewEflags |= X86_EFL_NT;
3808 }
3809
3810 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3811 pCtx->cr0 |= X86_CR0_TS;
3812 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3813
3814 pCtx->eip = uNewEip;
3815 pCtx->eax = uNewEax;
3816 pCtx->ecx = uNewEcx;
3817 pCtx->edx = uNewEdx;
3818 pCtx->ebx = uNewEbx;
3819 pCtx->esp = uNewEsp;
3820 pCtx->ebp = uNewEbp;
3821 pCtx->esi = uNewEsi;
3822 pCtx->edi = uNewEdi;
3823
3824 uNewEflags &= X86_EFL_LIVE_MASK;
3825 uNewEflags |= X86_EFL_RA1_MASK;
3826 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3827
3828 /*
3829 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3830 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3831 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3832 */
3833 pCtx->es.Sel = uNewES;
3834 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3835
3836 pCtx->cs.Sel = uNewCS;
3837 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3838
3839 pCtx->ss.Sel = uNewSS;
3840 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3841
3842 pCtx->ds.Sel = uNewDS;
3843 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3844
3845 pCtx->fs.Sel = uNewFS;
3846 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3847
3848 pCtx->gs.Sel = uNewGS;
3849 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3850 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3851
3852 pCtx->ldtr.Sel = uNewLdt;
3853 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3854 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3855 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3856
3857 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3858 {
3859 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3860 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3861 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3862 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3863 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3864 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3865 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3866 }
3867
3868 /*
3869 * Switch CR3 for the new task.
3870 */
3871 if ( fIsNewTSS386
3872 && (pCtx->cr0 & X86_CR0_PG))
3873 {
3874 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3875 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3876 {
3877 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3878 AssertRCSuccessReturn(rc, rc);
3879 }
3880 else
3881 pCtx->cr3 = uNewCr3;
3882
3883 /* Inform PGM. */
3884 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3885 {
3886 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3887 AssertRCReturn(rc, rc);
3888 /* ignore informational status codes */
3889 }
3890 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3891 }
3892
3893 /*
3894 * Switch LDTR for the new task.
3895 */
3896 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3897 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
3898 else
3899 {
3900 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3901
3902 IEMSELDESC DescNewLdt;
3903 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3904 if (rcStrict != VINF_SUCCESS)
3905 {
3906 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3907 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3908 return rcStrict;
3909 }
3910 if ( !DescNewLdt.Legacy.Gen.u1Present
3911 || DescNewLdt.Legacy.Gen.u1DescType
3912 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3913 {
3914 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3915 uNewLdt, DescNewLdt.Legacy.u));
3916 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3917 }
3918
3919 pCtx->ldtr.ValidSel = uNewLdt;
3920 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3921 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3922 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3923 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3924 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3925 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3926 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
3927 }
3928
3929 IEMSELDESC DescSS;
3930 if (IEM_IS_V86_MODE(pVCpu))
3931 {
3932 pVCpu->iem.s.uCpl = 3;
3933 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
3934 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
3935 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
3936 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
3937 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
3938 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
3939 }
3940 else
3941 {
3942 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3943
3944 /*
3945 * Load the stack segment for the new task.
3946 */
3947 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3948 {
3949 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3950 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3951 }
3952
3953 /* Fetch the descriptor. */
3954 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3955 if (rcStrict != VINF_SUCCESS)
3956 {
3957 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3958 VBOXSTRICTRC_VAL(rcStrict)));
3959 return rcStrict;
3960 }
3961
3962 /* SS must be a data segment and writable. */
3963 if ( !DescSS.Legacy.Gen.u1DescType
3964 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3965 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3966 {
3967 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3968 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3969 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3970 }
3971
3972 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3973 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3974 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3975 {
3976 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3977 uNewCpl));
3978 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3979 }
3980
3981 /* Is it there? */
3982 if (!DescSS.Legacy.Gen.u1Present)
3983 {
3984 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3985 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3986 }
3987
3988 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3989 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3990
3991 /* Set the accessed bit before committing the result into SS. */
3992 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3993 {
3994 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3995 if (rcStrict != VINF_SUCCESS)
3996 return rcStrict;
3997 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3998 }
3999
4000 /* Commit SS. */
4001 pCtx->ss.Sel = uNewSS;
4002 pCtx->ss.ValidSel = uNewSS;
4003 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4004 pCtx->ss.u32Limit = cbLimit;
4005 pCtx->ss.u64Base = u64Base;
4006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4008
4009 /* CPL has changed, update IEM before loading rest of segments. */
4010 pVCpu->iem.s.uCpl = uNewCpl;
4011
4012 /*
4013 * Load the data segments for the new task.
4014 */
4015 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4016 if (rcStrict != VINF_SUCCESS)
4017 return rcStrict;
4018 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4019 if (rcStrict != VINF_SUCCESS)
4020 return rcStrict;
4021 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4022 if (rcStrict != VINF_SUCCESS)
4023 return rcStrict;
4024 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4025 if (rcStrict != VINF_SUCCESS)
4026 return rcStrict;
4027
4028 /*
4029 * Load the code segment for the new task.
4030 */
4031 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4032 {
4033 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4034 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4035 }
4036
4037 /* Fetch the descriptor. */
4038 IEMSELDESC DescCS;
4039 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4040 if (rcStrict != VINF_SUCCESS)
4041 {
4042 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4043 return rcStrict;
4044 }
4045
4046 /* CS must be a code segment. */
4047 if ( !DescCS.Legacy.Gen.u1DescType
4048 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4049 {
4050 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4051 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4052 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4053 }
4054
4055 /* For conforming CS, DPL must be less than or equal to the RPL. */
4056 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4057 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4058 {
4059 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4060 DescCS.Legacy.Gen.u2Dpl));
4061 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4062 }
4063
4064 /* For non-conforming CS, DPL must match RPL. */
4065 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4066 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4067 {
4068 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4069 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4070 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4071 }
4072
4073 /* Is it there? */
4074 if (!DescCS.Legacy.Gen.u1Present)
4075 {
4076 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4077 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4078 }
4079
4080 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4081 u64Base = X86DESC_BASE(&DescCS.Legacy);
4082
4083 /* Set the accessed bit before committing the result into CS. */
4084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4085 {
4086 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4087 if (rcStrict != VINF_SUCCESS)
4088 return rcStrict;
4089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4090 }
4091
4092 /* Commit CS. */
4093 pCtx->cs.Sel = uNewCS;
4094 pCtx->cs.ValidSel = uNewCS;
4095 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4096 pCtx->cs.u32Limit = cbLimit;
4097 pCtx->cs.u64Base = u64Base;
4098 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4099 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4100 }
4101
4102 /** @todo Debug trap. */
4103 if (fIsNewTSS386 && fNewDebugTrap)
4104 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4105
4106 /*
4107 * Construct the error code masks based on what caused this task switch.
4108 * See Intel Instruction reference for INT.
4109 */
4110 uint16_t uExt;
4111 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4112 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4113 {
4114 uExt = 1;
4115 }
4116 else
4117 uExt = 0;
4118
4119 /*
4120 * Push any error code on to the new stack.
4121 */
4122 if (fFlags & IEM_XCPT_FLAGS_ERR)
4123 {
4124 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4125 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4126 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4127
4128 /* Check that there is sufficient space on the stack. */
4129 /** @todo Factor out segment limit checking for normal/expand down segments
4130 * into a separate function. */
4131 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4132 {
4133 if ( pCtx->esp - 1 > cbLimitSS
4134 || pCtx->esp < cbStackFrame)
4135 {
4136 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4137 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4138 cbStackFrame));
4139 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4140 }
4141 }
4142 else
4143 {
4144 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4145 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4146 {
4147 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4148 cbStackFrame));
4149 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4150 }
4151 }
4152
4153
4154 if (fIsNewTSS386)
4155 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4156 else
4157 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4158 if (rcStrict != VINF_SUCCESS)
4159 {
4160 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
4161 VBOXSTRICTRC_VAL(rcStrict)));
4162 return rcStrict;
4163 }
4164 }
4165
4166 /* Check the new EIP against the new CS limit. */
4167 if (pCtx->eip > pCtx->cs.u32Limit)
4168 {
4169 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4170 pCtx->eip, pCtx->cs.u32Limit));
4171 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4172 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4173 }
4174
4175 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4176 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4177}
4178
4179
4180/**
4181 * Implements exceptions and interrupts for protected mode.
4182 *
4183 * @returns VBox strict status code.
4184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4185 * @param pCtx The CPU context.
4186 * @param cbInstr The number of bytes to offset rIP by in the return
4187 * address.
4188 * @param u8Vector The interrupt / exception vector number.
4189 * @param fFlags The flags.
4190 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4191 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4192 */
4193IEM_STATIC VBOXSTRICTRC
4194iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4195 PCPUMCTX pCtx,
4196 uint8_t cbInstr,
4197 uint8_t u8Vector,
4198 uint32_t fFlags,
4199 uint16_t uErr,
4200 uint64_t uCr2)
4201{
4202 /*
4203 * Read the IDT entry.
4204 */
4205 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4206 {
4207 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4208 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4209 }
4210 X86DESC Idte;
4211 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4212 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4213 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4214 return rcStrict;
4215 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4216 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4217 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4218
4219 /*
4220 * Check the descriptor type, DPL and such.
4221 * ASSUMES this is done in the same order as described for call-gate calls.
4222 */
4223 if (Idte.Gate.u1DescType)
4224 {
4225 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4226 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4227 }
4228 bool fTaskGate = false;
4229 uint8_t f32BitGate = true;
4230 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4231 switch (Idte.Gate.u4Type)
4232 {
4233 case X86_SEL_TYPE_SYS_UNDEFINED:
4234 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4235 case X86_SEL_TYPE_SYS_LDT:
4236 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4237 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4238 case X86_SEL_TYPE_SYS_UNDEFINED2:
4239 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4240 case X86_SEL_TYPE_SYS_UNDEFINED3:
4241 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4242 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4243 case X86_SEL_TYPE_SYS_UNDEFINED4:
4244 {
4245 /** @todo check what actually happens when the type is wrong...
4246 * esp. call gates. */
4247 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4248 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4249 }
4250
4251 case X86_SEL_TYPE_SYS_286_INT_GATE:
4252 f32BitGate = false;
4253 case X86_SEL_TYPE_SYS_386_INT_GATE:
4254 fEflToClear |= X86_EFL_IF;
4255 break;
4256
4257 case X86_SEL_TYPE_SYS_TASK_GATE:
4258 fTaskGate = true;
4259#ifndef IEM_IMPLEMENTS_TASKSWITCH
4260 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4261#endif
4262 break;
4263
4264 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4265 f32BitGate = false;
4266 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4267 break;
4268
4269 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4270 }
4271
4272 /* Check DPL against CPL if applicable. */
4273 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4274 {
4275 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4276 {
4277 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4278 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4279 }
4280 }
4281
4282 /* Is it there? */
4283 if (!Idte.Gate.u1Present)
4284 {
4285 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4286 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4287 }
4288
4289 /* Is it a task-gate? */
4290 if (fTaskGate)
4291 {
4292 /*
4293 * Construct the error code masks based on what caused this task switch.
4294 * See Intel Instruction reference for INT.
4295 */
4296 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4297 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4298 RTSEL SelTSS = Idte.Gate.u16Sel;
4299
4300 /*
4301 * Fetch the TSS descriptor in the GDT.
4302 */
4303 IEMSELDESC DescTSS;
4304 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4305 if (rcStrict != VINF_SUCCESS)
4306 {
4307 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4308 VBOXSTRICTRC_VAL(rcStrict)));
4309 return rcStrict;
4310 }
4311
4312 /* The TSS descriptor must be a system segment and be available (not busy). */
4313 if ( DescTSS.Legacy.Gen.u1DescType
4314 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4315 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4316 {
4317 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4318 u8Vector, SelTSS, DescTSS.Legacy.au64));
4319 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4320 }
4321
4322 /* The TSS must be present. */
4323 if (!DescTSS.Legacy.Gen.u1Present)
4324 {
4325 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4326 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4327 }
4328
4329 /* Do the actual task switch. */
4330 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4331 }
4332
4333 /* A null CS is bad. */
4334 RTSEL NewCS = Idte.Gate.u16Sel;
4335 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4336 {
4337 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4338 return iemRaiseGeneralProtectionFault0(pVCpu);
4339 }
4340
4341 /* Fetch the descriptor for the new CS. */
4342 IEMSELDESC DescCS;
4343 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4347 return rcStrict;
4348 }
4349
4350 /* Must be a code segment. */
4351 if (!DescCS.Legacy.Gen.u1DescType)
4352 {
4353 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4354 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4355 }
4356 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4357 {
4358 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4359 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4360 }
4361
4362 /* Don't allow lowering the privilege level. */
4363 /** @todo Does the lowering of privileges apply to software interrupts
4364 * only? This has bearings on the more-privileged or
4365 * same-privilege stack behavior further down. A testcase would
4366 * be nice. */
4367 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4368 {
4369 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4370 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4371 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4372 }
4373
4374 /* Make sure the selector is present. */
4375 if (!DescCS.Legacy.Gen.u1Present)
4376 {
4377 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4378 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4379 }
4380
4381 /* Check the new EIP against the new CS limit. */
4382 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4383 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4384 ? Idte.Gate.u16OffsetLow
4385 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4386 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4387 if (uNewEip > cbLimitCS)
4388 {
4389 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4390 u8Vector, uNewEip, cbLimitCS, NewCS));
4391 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4392 }
4393
4394 /* Calc the flag image to push. */
4395 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4396 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4397 fEfl &= ~X86_EFL_RF;
4398 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4399 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4400
4401 /* From V8086 mode only go to CPL 0. */
4402 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4403 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4404 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4405 {
4406 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4407 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4408 }
4409
4410 /*
4411 * If the privilege level changes, we need to get a new stack from the TSS.
4412 * This in turns means validating the new SS and ESP...
4413 */
4414 if (uNewCpl != pVCpu->iem.s.uCpl)
4415 {
4416 RTSEL NewSS;
4417 uint32_t uNewEsp;
4418 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4419 if (rcStrict != VINF_SUCCESS)
4420 return rcStrict;
4421
4422 IEMSELDESC DescSS;
4423 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4424 if (rcStrict != VINF_SUCCESS)
4425 return rcStrict;
4426
4427 /* Check that there is sufficient space for the stack frame. */
4428 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4429 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4430 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4431 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4432
4433 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4434 {
4435 if ( uNewEsp - 1 > cbLimitSS
4436 || uNewEsp < cbStackFrame)
4437 {
4438 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4439 u8Vector, NewSS, uNewEsp, cbStackFrame));
4440 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4441 }
4442 }
4443 else
4444 {
4445 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4446 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4447 {
4448 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4449 u8Vector, NewSS, uNewEsp, cbStackFrame));
4450 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4451 }
4452 }
4453
4454 /*
4455 * Start making changes.
4456 */
4457
4458 /* Set the new CPL so that stack accesses use it. */
4459 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4460 pVCpu->iem.s.uCpl = uNewCpl;
4461
4462 /* Create the stack frame. */
4463 RTPTRUNION uStackFrame;
4464 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4465 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4466 if (rcStrict != VINF_SUCCESS)
4467 return rcStrict;
4468 void * const pvStackFrame = uStackFrame.pv;
4469 if (f32BitGate)
4470 {
4471 if (fFlags & IEM_XCPT_FLAGS_ERR)
4472 *uStackFrame.pu32++ = uErr;
4473 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4474 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4475 uStackFrame.pu32[2] = fEfl;
4476 uStackFrame.pu32[3] = pCtx->esp;
4477 uStackFrame.pu32[4] = pCtx->ss.Sel;
4478 if (fEfl & X86_EFL_VM)
4479 {
4480 uStackFrame.pu32[1] = pCtx->cs.Sel;
4481 uStackFrame.pu32[5] = pCtx->es.Sel;
4482 uStackFrame.pu32[6] = pCtx->ds.Sel;
4483 uStackFrame.pu32[7] = pCtx->fs.Sel;
4484 uStackFrame.pu32[8] = pCtx->gs.Sel;
4485 }
4486 }
4487 else
4488 {
4489 if (fFlags & IEM_XCPT_FLAGS_ERR)
4490 *uStackFrame.pu16++ = uErr;
4491 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4492 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4493 uStackFrame.pu16[2] = fEfl;
4494 uStackFrame.pu16[3] = pCtx->sp;
4495 uStackFrame.pu16[4] = pCtx->ss.Sel;
4496 if (fEfl & X86_EFL_VM)
4497 {
4498 uStackFrame.pu16[1] = pCtx->cs.Sel;
4499 uStackFrame.pu16[5] = pCtx->es.Sel;
4500 uStackFrame.pu16[6] = pCtx->ds.Sel;
4501 uStackFrame.pu16[7] = pCtx->fs.Sel;
4502 uStackFrame.pu16[8] = pCtx->gs.Sel;
4503 }
4504 }
4505 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4506 if (rcStrict != VINF_SUCCESS)
4507 return rcStrict;
4508
4509 /* Mark the selectors 'accessed' (hope this is the correct time). */
4510 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4511 * after pushing the stack frame? (Write protect the gdt + stack to
4512 * find out.) */
4513 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4514 {
4515 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4516 if (rcStrict != VINF_SUCCESS)
4517 return rcStrict;
4518 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4519 }
4520
4521 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4522 {
4523 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4524 if (rcStrict != VINF_SUCCESS)
4525 return rcStrict;
4526 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4527 }
4528
4529 /*
4530 * Start comitting the register changes (joins with the DPL=CPL branch).
4531 */
4532 pCtx->ss.Sel = NewSS;
4533 pCtx->ss.ValidSel = NewSS;
4534 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4535 pCtx->ss.u32Limit = cbLimitSS;
4536 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4537 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4538 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4539 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4540 * SP is loaded).
4541 * Need to check the other combinations too:
4542 * - 16-bit TSS, 32-bit handler
4543 * - 32-bit TSS, 16-bit handler */
4544 if (!pCtx->ss.Attr.n.u1DefBig)
4545 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4546 else
4547 pCtx->rsp = uNewEsp - cbStackFrame;
4548
4549 if (fEfl & X86_EFL_VM)
4550 {
4551 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4552 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4553 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4554 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4555 }
4556 }
4557 /*
4558 * Same privilege, no stack change and smaller stack frame.
4559 */
4560 else
4561 {
4562 uint64_t uNewRsp;
4563 RTPTRUNION uStackFrame;
4564 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4565 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4566 if (rcStrict != VINF_SUCCESS)
4567 return rcStrict;
4568 void * const pvStackFrame = uStackFrame.pv;
4569
4570 if (f32BitGate)
4571 {
4572 if (fFlags & IEM_XCPT_FLAGS_ERR)
4573 *uStackFrame.pu32++ = uErr;
4574 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4575 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4576 uStackFrame.pu32[2] = fEfl;
4577 }
4578 else
4579 {
4580 if (fFlags & IEM_XCPT_FLAGS_ERR)
4581 *uStackFrame.pu16++ = uErr;
4582 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4583 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4584 uStackFrame.pu16[2] = fEfl;
4585 }
4586 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4587 if (rcStrict != VINF_SUCCESS)
4588 return rcStrict;
4589
4590 /* Mark the CS selector as 'accessed'. */
4591 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4592 {
4593 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4594 if (rcStrict != VINF_SUCCESS)
4595 return rcStrict;
4596 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4597 }
4598
4599 /*
4600 * Start committing the register changes (joins with the other branch).
4601 */
4602 pCtx->rsp = uNewRsp;
4603 }
4604
4605 /* ... register committing continues. */
4606 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4607 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4608 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4609 pCtx->cs.u32Limit = cbLimitCS;
4610 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4611 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4612
4613 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4614 fEfl &= ~fEflToClear;
4615 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4616
4617 if (fFlags & IEM_XCPT_FLAGS_CR2)
4618 pCtx->cr2 = uCr2;
4619
4620 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4621 iemRaiseXcptAdjustState(pCtx, u8Vector);
4622
4623 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4624}
4625
4626
4627/**
4628 * Implements exceptions and interrupts for long mode.
4629 *
4630 * @returns VBox strict status code.
4631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4632 * @param pCtx The CPU context.
4633 * @param cbInstr The number of bytes to offset rIP by in the return
4634 * address.
4635 * @param u8Vector The interrupt / exception vector number.
4636 * @param fFlags The flags.
4637 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4638 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4639 */
4640IEM_STATIC VBOXSTRICTRC
4641iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4642 PCPUMCTX pCtx,
4643 uint8_t cbInstr,
4644 uint8_t u8Vector,
4645 uint32_t fFlags,
4646 uint16_t uErr,
4647 uint64_t uCr2)
4648{
4649 /*
4650 * Read the IDT entry.
4651 */
4652 uint16_t offIdt = (uint16_t)u8Vector << 4;
4653 if (pCtx->idtr.cbIdt < offIdt + 7)
4654 {
4655 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4656 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4657 }
4658 X86DESC64 Idte;
4659 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4660 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4661 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4662 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4663 return rcStrict;
4664 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4665 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4666 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4667
4668 /*
4669 * Check the descriptor type, DPL and such.
4670 * ASSUMES this is done in the same order as described for call-gate calls.
4671 */
4672 if (Idte.Gate.u1DescType)
4673 {
4674 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4675 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4676 }
4677 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4678 switch (Idte.Gate.u4Type)
4679 {
4680 case AMD64_SEL_TYPE_SYS_INT_GATE:
4681 fEflToClear |= X86_EFL_IF;
4682 break;
4683 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4684 break;
4685
4686 default:
4687 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4688 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4689 }
4690
4691 /* Check DPL against CPL if applicable. */
4692 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4693 {
4694 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4695 {
4696 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4697 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4698 }
4699 }
4700
4701 /* Is it there? */
4702 if (!Idte.Gate.u1Present)
4703 {
4704 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4705 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4706 }
4707
4708 /* A null CS is bad. */
4709 RTSEL NewCS = Idte.Gate.u16Sel;
4710 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4711 {
4712 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714 }
4715
4716 /* Fetch the descriptor for the new CS. */
4717 IEMSELDESC DescCS;
4718 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4719 if (rcStrict != VINF_SUCCESS)
4720 {
4721 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4722 return rcStrict;
4723 }
4724
4725 /* Must be a 64-bit code segment. */
4726 if (!DescCS.Long.Gen.u1DescType)
4727 {
4728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4729 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4730 }
4731 if ( !DescCS.Long.Gen.u1Long
4732 || DescCS.Long.Gen.u1DefBig
4733 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4734 {
4735 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4736 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4737 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4738 }
4739
4740 /* Don't allow lowering the privilege level. For non-conforming CS
4741 selectors, the CS.DPL sets the privilege level the trap/interrupt
4742 handler runs at. For conforming CS selectors, the CPL remains
4743 unchanged, but the CS.DPL must be <= CPL. */
4744 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4745 * when CPU in Ring-0. Result \#GP? */
4746 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4747 {
4748 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4749 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4750 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4751 }
4752
4753
4754 /* Make sure the selector is present. */
4755 if (!DescCS.Legacy.Gen.u1Present)
4756 {
4757 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4758 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4759 }
4760
4761 /* Check that the new RIP is canonical. */
4762 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4763 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4764 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4765 if (!IEM_IS_CANONICAL(uNewRip))
4766 {
4767 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4768 return iemRaiseGeneralProtectionFault0(pVCpu);
4769 }
4770
4771 /*
4772 * If the privilege level changes or if the IST isn't zero, we need to get
4773 * a new stack from the TSS.
4774 */
4775 uint64_t uNewRsp;
4776 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4777 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4778 if ( uNewCpl != pVCpu->iem.s.uCpl
4779 || Idte.Gate.u3IST != 0)
4780 {
4781 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4782 if (rcStrict != VINF_SUCCESS)
4783 return rcStrict;
4784 }
4785 else
4786 uNewRsp = pCtx->rsp;
4787 uNewRsp &= ~(uint64_t)0xf;
4788
4789 /*
4790 * Calc the flag image to push.
4791 */
4792 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4793 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4794 fEfl &= ~X86_EFL_RF;
4795 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4796 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4797
4798 /*
4799 * Start making changes.
4800 */
4801 /* Set the new CPL so that stack accesses use it. */
4802 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4803 pVCpu->iem.s.uCpl = uNewCpl;
4804
4805 /* Create the stack frame. */
4806 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4807 RTPTRUNION uStackFrame;
4808 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4809 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4810 if (rcStrict != VINF_SUCCESS)
4811 return rcStrict;
4812 void * const pvStackFrame = uStackFrame.pv;
4813
4814 if (fFlags & IEM_XCPT_FLAGS_ERR)
4815 *uStackFrame.pu64++ = uErr;
4816 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4817 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4818 uStackFrame.pu64[2] = fEfl;
4819 uStackFrame.pu64[3] = pCtx->rsp;
4820 uStackFrame.pu64[4] = pCtx->ss.Sel;
4821 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4822 if (rcStrict != VINF_SUCCESS)
4823 return rcStrict;
4824
4825 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4826 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4827 * after pushing the stack frame? (Write protect the gdt + stack to
4828 * find out.) */
4829 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4830 {
4831 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4832 if (rcStrict != VINF_SUCCESS)
4833 return rcStrict;
4834 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4835 }
4836
4837 /*
4838 * Start comitting the register changes.
4839 */
4840 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4841 * hidden registers when interrupting 32-bit or 16-bit code! */
4842 if (uNewCpl != uOldCpl)
4843 {
4844 pCtx->ss.Sel = 0 | uNewCpl;
4845 pCtx->ss.ValidSel = 0 | uNewCpl;
4846 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4847 pCtx->ss.u32Limit = UINT32_MAX;
4848 pCtx->ss.u64Base = 0;
4849 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4850 }
4851 pCtx->rsp = uNewRsp - cbStackFrame;
4852 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4853 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4854 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4855 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4856 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4857 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4858 pCtx->rip = uNewRip;
4859
4860 fEfl &= ~fEflToClear;
4861 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4862
4863 if (fFlags & IEM_XCPT_FLAGS_CR2)
4864 pCtx->cr2 = uCr2;
4865
4866 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4867 iemRaiseXcptAdjustState(pCtx, u8Vector);
4868
4869 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4870}
4871
4872
4873/**
4874 * Implements exceptions and interrupts.
4875 *
4876 * All exceptions and interrupts goes thru this function!
4877 *
4878 * @returns VBox strict status code.
4879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4880 * @param cbInstr The number of bytes to offset rIP by in the return
4881 * address.
4882 * @param u8Vector The interrupt / exception vector number.
4883 * @param fFlags The flags.
4884 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4885 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4886 */
4887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
4888iemRaiseXcptOrInt(PVMCPU pVCpu,
4889 uint8_t cbInstr,
4890 uint8_t u8Vector,
4891 uint32_t fFlags,
4892 uint16_t uErr,
4893 uint64_t uCr2)
4894{
4895 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4896#ifdef IN_RING0
4897 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
4898 AssertRCReturn(rc, rc);
4899#endif
4900
4901#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4902 /*
4903 * Flush prefetch buffer
4904 */
4905 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4906#endif
4907
4908 /*
4909 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4910 */
4911 if ( pCtx->eflags.Bits.u1VM
4912 && pCtx->eflags.Bits.u2IOPL != 3
4913 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4914 && (pCtx->cr0 & X86_CR0_PE) )
4915 {
4916 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4917 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4918 u8Vector = X86_XCPT_GP;
4919 uErr = 0;
4920 }
4921#ifdef DBGFTRACE_ENABLED
4922 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4923 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4924 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
4925#endif
4926
4927 /*
4928 * Do recursion accounting.
4929 */
4930 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4931 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4932 if (pVCpu->iem.s.cXcptRecursions == 0)
4933 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4934 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4935 else
4936 {
4937 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4938 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4939
4940 /** @todo double and tripple faults. */
4941 if (pVCpu->iem.s.cXcptRecursions >= 3)
4942 {
4943#ifdef DEBUG_bird
4944 AssertFailed();
4945#endif
4946 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4947 }
4948
4949 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4950 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4951 {
4952 ....
4953 } */
4954 }
4955 pVCpu->iem.s.cXcptRecursions++;
4956 pVCpu->iem.s.uCurXcpt = u8Vector;
4957 pVCpu->iem.s.fCurXcpt = fFlags;
4958
4959 /*
4960 * Extensive logging.
4961 */
4962#if defined(LOG_ENABLED) && defined(IN_RING3)
4963 if (LogIs3Enabled())
4964 {
4965 PVM pVM = pVCpu->CTX_SUFF(pVM);
4966 char szRegs[4096];
4967 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4968 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4969 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4970 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4971 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4972 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4973 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4974 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4975 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4976 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4977 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4978 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4979 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4980 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4981 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4982 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4983 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4984 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4985 " efer=%016VR{efer}\n"
4986 " pat=%016VR{pat}\n"
4987 " sf_mask=%016VR{sf_mask}\n"
4988 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4989 " lstar=%016VR{lstar}\n"
4990 " star=%016VR{star} cstar=%016VR{cstar}\n"
4991 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4992 );
4993
4994 char szInstr[256];
4995 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4996 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4997 szInstr, sizeof(szInstr), NULL);
4998 Log3(("%s%s\n", szRegs, szInstr));
4999 }
5000#endif /* LOG_ENABLED */
5001
5002 /*
5003 * Call the mode specific worker function.
5004 */
5005 VBOXSTRICTRC rcStrict;
5006 if (!(pCtx->cr0 & X86_CR0_PE))
5007 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5008 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5009 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5010 else
5011 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5012
5013 /* Flush the prefetch buffer. */
5014#ifdef IEM_WITH_CODE_TLB
5015 pVCpu->iem.s.pbInstrBuf = NULL;
5016#else
5017 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5018#endif
5019
5020 /*
5021 * Unwind.
5022 */
5023 pVCpu->iem.s.cXcptRecursions--;
5024 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5025 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5026 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5027 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5028 return rcStrict;
5029}
5030
5031#ifdef IEM_WITH_SETJMP
5032/**
5033 * See iemRaiseXcptOrInt. Will not return.
5034 */
5035IEM_STATIC DECL_NO_RETURN(void)
5036iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5037 uint8_t cbInstr,
5038 uint8_t u8Vector,
5039 uint32_t fFlags,
5040 uint16_t uErr,
5041 uint64_t uCr2)
5042{
5043 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5044 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5045}
5046#endif
5047
5048
5049/** \#DE - 00. */
5050DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5051{
5052 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5053}
5054
5055
5056/** \#DB - 01.
5057 * @note This automatically clear DR7.GD. */
5058DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5059{
5060 /** @todo set/clear RF. */
5061 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5062 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5063}
5064
5065
5066/** \#UD - 06. */
5067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5068{
5069 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5070}
5071
5072
5073/** \#NM - 07. */
5074DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5075{
5076 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5077}
5078
5079
5080/** \#TS(err) - 0a. */
5081DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5082{
5083 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5084}
5085
5086
5087/** \#TS(tr) - 0a. */
5088DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5089{
5090 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5091 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5092}
5093
5094
5095/** \#TS(0) - 0a. */
5096DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5097{
5098 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5099 0, 0);
5100}
5101
5102
5103/** \#TS(err) - 0a. */
5104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5105{
5106 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5107 uSel & X86_SEL_MASK_OFF_RPL, 0);
5108}
5109
5110
5111/** \#NP(err) - 0b. */
5112DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5113{
5114 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5115}
5116
5117
5118/** \#NP(seg) - 0b. */
5119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5120{
5121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5122 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5123}
5124
5125
5126/** \#NP(sel) - 0b. */
5127DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5128{
5129 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5130 uSel & ~X86_SEL_RPL, 0);
5131}
5132
5133
5134/** \#SS(seg) - 0c. */
5135DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5136{
5137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5138 uSel & ~X86_SEL_RPL, 0);
5139}
5140
5141
5142/** \#SS(err) - 0c. */
5143DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5144{
5145 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5146}
5147
5148
5149/** \#GP(n) - 0d. */
5150DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5151{
5152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5153}
5154
5155
5156/** \#GP(0) - 0d. */
5157DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5158{
5159 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5160}
5161
5162#ifdef IEM_WITH_SETJMP
5163/** \#GP(0) - 0d. */
5164DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5165{
5166 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5167}
5168#endif
5169
5170
5171/** \#GP(sel) - 0d. */
5172DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5173{
5174 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5175 Sel & ~X86_SEL_RPL, 0);
5176}
5177
5178
5179/** \#GP(0) - 0d. */
5180DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5181{
5182 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5183}
5184
5185
5186/** \#GP(sel) - 0d. */
5187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5188{
5189 NOREF(iSegReg); NOREF(fAccess);
5190 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5191 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5192}
5193
5194#ifdef IEM_WITH_SETJMP
5195/** \#GP(sel) - 0d, longjmp. */
5196DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5197{
5198 NOREF(iSegReg); NOREF(fAccess);
5199 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5200 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5201}
5202#endif
5203
5204/** \#GP(sel) - 0d. */
5205DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5206{
5207 NOREF(Sel);
5208 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5209}
5210
5211#ifdef IEM_WITH_SETJMP
5212/** \#GP(sel) - 0d, longjmp. */
5213DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5214{
5215 NOREF(Sel);
5216 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5217}
5218#endif
5219
5220
5221/** \#GP(sel) - 0d. */
5222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5223{
5224 NOREF(iSegReg); NOREF(fAccess);
5225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5226}
5227
5228#ifdef IEM_WITH_SETJMP
5229/** \#GP(sel) - 0d, longjmp. */
5230DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5231 uint32_t fAccess)
5232{
5233 NOREF(iSegReg); NOREF(fAccess);
5234 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5235}
5236#endif
5237
5238
5239/** \#PF(n) - 0e. */
5240DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5241{
5242 uint16_t uErr;
5243 switch (rc)
5244 {
5245 case VERR_PAGE_NOT_PRESENT:
5246 case VERR_PAGE_TABLE_NOT_PRESENT:
5247 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5248 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5249 uErr = 0;
5250 break;
5251
5252 default:
5253 AssertMsgFailed(("%Rrc\n", rc));
5254 case VERR_ACCESS_DENIED:
5255 uErr = X86_TRAP_PF_P;
5256 break;
5257
5258 /** @todo reserved */
5259 }
5260
5261 if (pVCpu->iem.s.uCpl == 3)
5262 uErr |= X86_TRAP_PF_US;
5263
5264 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5265 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5266 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5267 uErr |= X86_TRAP_PF_ID;
5268
5269#if 0 /* This is so much non-sense, really. Why was it done like that? */
5270 /* Note! RW access callers reporting a WRITE protection fault, will clear
5271 the READ flag before calling. So, read-modify-write accesses (RW)
5272 can safely be reported as READ faults. */
5273 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5274 uErr |= X86_TRAP_PF_RW;
5275#else
5276 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5277 {
5278 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5279 uErr |= X86_TRAP_PF_RW;
5280 }
5281#endif
5282
5283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5284 uErr, GCPtrWhere);
5285}
5286
5287
5288/** \#MF(0) - 10. */
5289DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5290{
5291 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5292}
5293
5294
5295/** \#AC(0) - 11. */
5296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5297{
5298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5299}
5300
5301
5302/**
5303 * Macro for calling iemCImplRaiseDivideError().
5304 *
5305 * This enables us to add/remove arguments and force different levels of
5306 * inlining as we wish.
5307 *
5308 * @return Strict VBox status code.
5309 */
5310#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5311IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5312{
5313 NOREF(cbInstr);
5314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5315}
5316
5317
5318/**
5319 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5320 *
5321 * This enables us to add/remove arguments and force different levels of
5322 * inlining as we wish.
5323 *
5324 * @return Strict VBox status code.
5325 */
5326#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5327IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5328{
5329 NOREF(cbInstr);
5330 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5331}
5332
5333
5334/**
5335 * Macro for calling iemCImplRaiseInvalidOpcode().
5336 *
5337 * This enables us to add/remove arguments and force different levels of
5338 * inlining as we wish.
5339 *
5340 * @return Strict VBox status code.
5341 */
5342#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5343IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5344{
5345 NOREF(cbInstr);
5346 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5347}
5348
5349
5350/** @} */
5351
5352
5353/*
5354 *
5355 * Helpers routines.
5356 * Helpers routines.
5357 * Helpers routines.
5358 *
5359 */
5360
5361/**
5362 * Recalculates the effective operand size.
5363 *
5364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5365 */
5366IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5367{
5368 switch (pVCpu->iem.s.enmCpuMode)
5369 {
5370 case IEMMODE_16BIT:
5371 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5372 break;
5373 case IEMMODE_32BIT:
5374 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5375 break;
5376 case IEMMODE_64BIT:
5377 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5378 {
5379 case 0:
5380 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5381 break;
5382 case IEM_OP_PRF_SIZE_OP:
5383 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5384 break;
5385 case IEM_OP_PRF_SIZE_REX_W:
5386 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5387 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5388 break;
5389 }
5390 break;
5391 default:
5392 AssertFailed();
5393 }
5394}
5395
5396
5397/**
5398 * Sets the default operand size to 64-bit and recalculates the effective
5399 * operand size.
5400 *
5401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5402 */
5403IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5404{
5405 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5406 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5407 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5408 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5409 else
5410 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5411}
5412
5413
5414/*
5415 *
5416 * Common opcode decoders.
5417 * Common opcode decoders.
5418 * Common opcode decoders.
5419 *
5420 */
5421//#include <iprt/mem.h>
5422
5423/**
5424 * Used to add extra details about a stub case.
5425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5426 */
5427IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5428{
5429#if defined(LOG_ENABLED) && defined(IN_RING3)
5430 PVM pVM = pVCpu->CTX_SUFF(pVM);
5431 char szRegs[4096];
5432 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5433 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5434 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5435 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5436 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5437 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5438 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5439 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5440 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5441 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5442 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5443 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5444 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5445 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5446 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5447 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5448 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5449 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5450 " efer=%016VR{efer}\n"
5451 " pat=%016VR{pat}\n"
5452 " sf_mask=%016VR{sf_mask}\n"
5453 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5454 " lstar=%016VR{lstar}\n"
5455 " star=%016VR{star} cstar=%016VR{cstar}\n"
5456 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5457 );
5458
5459 char szInstr[256];
5460 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5461 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5462 szInstr, sizeof(szInstr), NULL);
5463
5464 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5465#else
5466 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5467#endif
5468}
5469
5470/**
5471 * Complains about a stub.
5472 *
5473 * Providing two versions of this macro, one for daily use and one for use when
5474 * working on IEM.
5475 */
5476#if 0
5477# define IEMOP_BITCH_ABOUT_STUB() \
5478 do { \
5479 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5480 iemOpStubMsg2(pVCpu); \
5481 RTAssertPanic(); \
5482 } while (0)
5483#else
5484# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5485#endif
5486
5487/** Stubs an opcode. */
5488#define FNIEMOP_STUB(a_Name) \
5489 FNIEMOP_DEF(a_Name) \
5490 { \
5491 IEMOP_BITCH_ABOUT_STUB(); \
5492 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5493 } \
5494 typedef int ignore_semicolon
5495
5496/** Stubs an opcode. */
5497#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5498 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5499 { \
5500 IEMOP_BITCH_ABOUT_STUB(); \
5501 NOREF(a_Name0); \
5502 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5503 } \
5504 typedef int ignore_semicolon
5505
5506/** Stubs an opcode which currently should raise \#UD. */
5507#define FNIEMOP_UD_STUB(a_Name) \
5508 FNIEMOP_DEF(a_Name) \
5509 { \
5510 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5511 return IEMOP_RAISE_INVALID_OPCODE(); \
5512 } \
5513 typedef int ignore_semicolon
5514
5515/** Stubs an opcode which currently should raise \#UD. */
5516#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5517 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5518 { \
5519 NOREF(a_Name0); \
5520 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5521 return IEMOP_RAISE_INVALID_OPCODE(); \
5522 } \
5523 typedef int ignore_semicolon
5524
5525
5526
5527/** @name Register Access.
5528 * @{
5529 */
5530
5531/**
5532 * Gets a reference (pointer) to the specified hidden segment register.
5533 *
5534 * @returns Hidden register reference.
5535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5536 * @param iSegReg The segment register.
5537 */
5538IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5539{
5540 Assert(iSegReg < X86_SREG_COUNT);
5541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5542 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5543
5544#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5545 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5546 { /* likely */ }
5547 else
5548 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5549#else
5550 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5551#endif
5552 return pSReg;
5553}
5554
5555
5556/**
5557 * Ensures that the given hidden segment register is up to date.
5558 *
5559 * @returns Hidden register reference.
5560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5561 * @param pSReg The segment register.
5562 */
5563IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5564{
5565#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5566 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5567 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5568#else
5569 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5570 NOREF(pVCpu);
5571#endif
5572 return pSReg;
5573}
5574
5575
5576/**
5577 * Gets a reference (pointer) to the specified segment register (the selector
5578 * value).
5579 *
5580 * @returns Pointer to the selector variable.
5581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5582 * @param iSegReg The segment register.
5583 */
5584DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5585{
5586 Assert(iSegReg < X86_SREG_COUNT);
5587 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5588 return &pCtx->aSRegs[iSegReg].Sel;
5589}
5590
5591
5592/**
5593 * Fetches the selector value of a segment register.
5594 *
5595 * @returns The selector value.
5596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5597 * @param iSegReg The segment register.
5598 */
5599DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5600{
5601 Assert(iSegReg < X86_SREG_COUNT);
5602 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5603}
5604
5605
5606/**
5607 * Gets a reference (pointer) to the specified general purpose register.
5608 *
5609 * @returns Register reference.
5610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5611 * @param iReg The general purpose register.
5612 */
5613DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5614{
5615 Assert(iReg < 16);
5616 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5617 return &pCtx->aGRegs[iReg];
5618}
5619
5620
5621/**
5622 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5623 *
5624 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5625 *
5626 * @returns Register reference.
5627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5628 * @param iReg The register.
5629 */
5630DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5631{
5632 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5633 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5634 {
5635 Assert(iReg < 16);
5636 return &pCtx->aGRegs[iReg].u8;
5637 }
5638 /* high 8-bit register. */
5639 Assert(iReg < 8);
5640 return &pCtx->aGRegs[iReg & 3].bHi;
5641}
5642
5643
5644/**
5645 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5646 *
5647 * @returns Register reference.
5648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5649 * @param iReg The register.
5650 */
5651DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5652{
5653 Assert(iReg < 16);
5654 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5655 return &pCtx->aGRegs[iReg].u16;
5656}
5657
5658
5659/**
5660 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5661 *
5662 * @returns Register reference.
5663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5664 * @param iReg The register.
5665 */
5666DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5667{
5668 Assert(iReg < 16);
5669 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5670 return &pCtx->aGRegs[iReg].u32;
5671}
5672
5673
5674/**
5675 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5676 *
5677 * @returns Register reference.
5678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5679 * @param iReg The register.
5680 */
5681DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5682{
5683 Assert(iReg < 64);
5684 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5685 return &pCtx->aGRegs[iReg].u64;
5686}
5687
5688
5689/**
5690 * Fetches the value of a 8-bit general purpose register.
5691 *
5692 * @returns The register value.
5693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5694 * @param iReg The register.
5695 */
5696DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5697{
5698 return *iemGRegRefU8(pVCpu, iReg);
5699}
5700
5701
5702/**
5703 * Fetches the value of a 16-bit general purpose register.
5704 *
5705 * @returns The register value.
5706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5707 * @param iReg The register.
5708 */
5709DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5710{
5711 Assert(iReg < 16);
5712 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5713}
5714
5715
5716/**
5717 * Fetches the value of a 32-bit general purpose register.
5718 *
5719 * @returns The register value.
5720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5721 * @param iReg The register.
5722 */
5723DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5724{
5725 Assert(iReg < 16);
5726 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5727}
5728
5729
5730/**
5731 * Fetches the value of a 64-bit general purpose register.
5732 *
5733 * @returns The register value.
5734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5735 * @param iReg The register.
5736 */
5737DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5738{
5739 Assert(iReg < 16);
5740 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5741}
5742
5743
5744/**
5745 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5746 *
5747 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5748 * segment limit.
5749 *
5750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5751 * @param offNextInstr The offset of the next instruction.
5752 */
5753IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5754{
5755 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5756 switch (pVCpu->iem.s.enmEffOpSize)
5757 {
5758 case IEMMODE_16BIT:
5759 {
5760 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5761 if ( uNewIp > pCtx->cs.u32Limit
5762 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5763 return iemRaiseGeneralProtectionFault0(pVCpu);
5764 pCtx->rip = uNewIp;
5765 break;
5766 }
5767
5768 case IEMMODE_32BIT:
5769 {
5770 Assert(pCtx->rip <= UINT32_MAX);
5771 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5772
5773 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5774 if (uNewEip > pCtx->cs.u32Limit)
5775 return iemRaiseGeneralProtectionFault0(pVCpu);
5776 pCtx->rip = uNewEip;
5777 break;
5778 }
5779
5780 case IEMMODE_64BIT:
5781 {
5782 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5783
5784 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5785 if (!IEM_IS_CANONICAL(uNewRip))
5786 return iemRaiseGeneralProtectionFault0(pVCpu);
5787 pCtx->rip = uNewRip;
5788 break;
5789 }
5790
5791 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5792 }
5793
5794 pCtx->eflags.Bits.u1RF = 0;
5795
5796#ifndef IEM_WITH_CODE_TLB
5797 /* Flush the prefetch buffer. */
5798 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5799#endif
5800
5801 return VINF_SUCCESS;
5802}
5803
5804
5805/**
5806 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5807 *
5808 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5809 * segment limit.
5810 *
5811 * @returns Strict VBox status code.
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param offNextInstr The offset of the next instruction.
5814 */
5815IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5816{
5817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5818 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5819
5820 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5821 if ( uNewIp > pCtx->cs.u32Limit
5822 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5823 return iemRaiseGeneralProtectionFault0(pVCpu);
5824 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5825 pCtx->rip = uNewIp;
5826 pCtx->eflags.Bits.u1RF = 0;
5827
5828#ifndef IEM_WITH_CODE_TLB
5829 /* Flush the prefetch buffer. */
5830 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5831#endif
5832
5833 return VINF_SUCCESS;
5834}
5835
5836
5837/**
5838 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5839 *
5840 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5841 * segment limit.
5842 *
5843 * @returns Strict VBox status code.
5844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5845 * @param offNextInstr The offset of the next instruction.
5846 */
5847IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5848{
5849 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5850 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5851
5852 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5853 {
5854 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5855
5856 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5857 if (uNewEip > pCtx->cs.u32Limit)
5858 return iemRaiseGeneralProtectionFault0(pVCpu);
5859 pCtx->rip = uNewEip;
5860 }
5861 else
5862 {
5863 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5864
5865 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5866 if (!IEM_IS_CANONICAL(uNewRip))
5867 return iemRaiseGeneralProtectionFault0(pVCpu);
5868 pCtx->rip = uNewRip;
5869 }
5870 pCtx->eflags.Bits.u1RF = 0;
5871
5872#ifndef IEM_WITH_CODE_TLB
5873 /* Flush the prefetch buffer. */
5874 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5875#endif
5876
5877 return VINF_SUCCESS;
5878}
5879
5880
5881/**
5882 * Performs a near jump to the specified address.
5883 *
5884 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5885 * segment limit.
5886 *
5887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5888 * @param uNewRip The new RIP value.
5889 */
5890IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
5891{
5892 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5893 switch (pVCpu->iem.s.enmEffOpSize)
5894 {
5895 case IEMMODE_16BIT:
5896 {
5897 Assert(uNewRip <= UINT16_MAX);
5898 if ( uNewRip > pCtx->cs.u32Limit
5899 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5900 return iemRaiseGeneralProtectionFault0(pVCpu);
5901 /** @todo Test 16-bit jump in 64-bit mode. */
5902 pCtx->rip = uNewRip;
5903 break;
5904 }
5905
5906 case IEMMODE_32BIT:
5907 {
5908 Assert(uNewRip <= UINT32_MAX);
5909 Assert(pCtx->rip <= UINT32_MAX);
5910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5911
5912 if (uNewRip > pCtx->cs.u32Limit)
5913 return iemRaiseGeneralProtectionFault0(pVCpu);
5914 pCtx->rip = uNewRip;
5915 break;
5916 }
5917
5918 case IEMMODE_64BIT:
5919 {
5920 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5921
5922 if (!IEM_IS_CANONICAL(uNewRip))
5923 return iemRaiseGeneralProtectionFault0(pVCpu);
5924 pCtx->rip = uNewRip;
5925 break;
5926 }
5927
5928 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5929 }
5930
5931 pCtx->eflags.Bits.u1RF = 0;
5932
5933#ifndef IEM_WITH_CODE_TLB
5934 /* Flush the prefetch buffer. */
5935 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5936#endif
5937
5938 return VINF_SUCCESS;
5939}
5940
5941
5942/**
5943 * Get the address of the top of the stack.
5944 *
5945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5946 * @param pCtx The CPU context which SP/ESP/RSP should be
5947 * read.
5948 */
5949DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
5950{
5951 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5952 return pCtx->rsp;
5953 if (pCtx->ss.Attr.n.u1DefBig)
5954 return pCtx->esp;
5955 return pCtx->sp;
5956}
5957
5958
5959/**
5960 * Updates the RIP/EIP/IP to point to the next instruction.
5961 *
5962 * This function leaves the EFLAGS.RF flag alone.
5963 *
5964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5965 * @param cbInstr The number of bytes to add.
5966 */
5967IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
5968{
5969 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5970 switch (pVCpu->iem.s.enmCpuMode)
5971 {
5972 case IEMMODE_16BIT:
5973 Assert(pCtx->rip <= UINT16_MAX);
5974 pCtx->eip += cbInstr;
5975 pCtx->eip &= UINT32_C(0xffff);
5976 break;
5977
5978 case IEMMODE_32BIT:
5979 pCtx->eip += cbInstr;
5980 Assert(pCtx->rip <= UINT32_MAX);
5981 break;
5982
5983 case IEMMODE_64BIT:
5984 pCtx->rip += cbInstr;
5985 break;
5986 default: AssertFailed();
5987 }
5988}
5989
5990
5991#if 0
5992/**
5993 * Updates the RIP/EIP/IP to point to the next instruction.
5994 *
5995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5996 */
5997IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
5998{
5999 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6000}
6001#endif
6002
6003
6004
6005/**
6006 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6007 *
6008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6009 * @param cbInstr The number of bytes to add.
6010 */
6011IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6012{
6013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6014
6015 pCtx->eflags.Bits.u1RF = 0;
6016
6017 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6018#if ARCH_BITS >= 64
6019 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6020 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6021 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6022#else
6023 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6024 pCtx->rip += cbInstr;
6025 else
6026 {
6027 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6028 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6029 }
6030#endif
6031}
6032
6033
6034/**
6035 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6036 *
6037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6038 */
6039IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6040{
6041 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6042}
6043
6044
6045/**
6046 * Adds to the stack pointer.
6047 *
6048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6049 * @param pCtx The CPU context which SP/ESP/RSP should be
6050 * updated.
6051 * @param cbToAdd The number of bytes to add (8-bit!).
6052 */
6053DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6054{
6055 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6056 pCtx->rsp += cbToAdd;
6057 else if (pCtx->ss.Attr.n.u1DefBig)
6058 pCtx->esp += cbToAdd;
6059 else
6060 pCtx->sp += cbToAdd;
6061}
6062
6063
6064/**
6065 * Subtracts from the stack pointer.
6066 *
6067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6068 * @param pCtx The CPU context which SP/ESP/RSP should be
6069 * updated.
6070 * @param cbToSub The number of bytes to subtract (8-bit!).
6071 */
6072DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6073{
6074 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6075 pCtx->rsp -= cbToSub;
6076 else if (pCtx->ss.Attr.n.u1DefBig)
6077 pCtx->esp -= cbToSub;
6078 else
6079 pCtx->sp -= cbToSub;
6080}
6081
6082
6083/**
6084 * Adds to the temporary stack pointer.
6085 *
6086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6087 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6088 * @param cbToAdd The number of bytes to add (16-bit).
6089 * @param pCtx Where to get the current stack mode.
6090 */
6091DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6092{
6093 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6094 pTmpRsp->u += cbToAdd;
6095 else if (pCtx->ss.Attr.n.u1DefBig)
6096 pTmpRsp->DWords.dw0 += cbToAdd;
6097 else
6098 pTmpRsp->Words.w0 += cbToAdd;
6099}
6100
6101
6102/**
6103 * Subtracts from the temporary stack pointer.
6104 *
6105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6106 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6107 * @param cbToSub The number of bytes to subtract.
6108 * @param pCtx Where to get the current stack mode.
6109 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6110 * expecting that.
6111 */
6112DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6113{
6114 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6115 pTmpRsp->u -= cbToSub;
6116 else if (pCtx->ss.Attr.n.u1DefBig)
6117 pTmpRsp->DWords.dw0 -= cbToSub;
6118 else
6119 pTmpRsp->Words.w0 -= cbToSub;
6120}
6121
6122
6123/**
6124 * Calculates the effective stack address for a push of the specified size as
6125 * well as the new RSP value (upper bits may be masked).
6126 *
6127 * @returns Effective stack addressf for the push.
6128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6129 * @param pCtx Where to get the current stack mode.
6130 * @param cbItem The size of the stack item to pop.
6131 * @param puNewRsp Where to return the new RSP value.
6132 */
6133DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6134{
6135 RTUINT64U uTmpRsp;
6136 RTGCPTR GCPtrTop;
6137 uTmpRsp.u = pCtx->rsp;
6138
6139 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6140 GCPtrTop = uTmpRsp.u -= cbItem;
6141 else if (pCtx->ss.Attr.n.u1DefBig)
6142 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6143 else
6144 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6145 *puNewRsp = uTmpRsp.u;
6146 return GCPtrTop;
6147}
6148
6149
6150/**
6151 * Gets the current stack pointer and calculates the value after a pop of the
6152 * specified size.
6153 *
6154 * @returns Current stack pointer.
6155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6156 * @param pCtx Where to get the current stack mode.
6157 * @param cbItem The size of the stack item to pop.
6158 * @param puNewRsp Where to return the new RSP value.
6159 */
6160DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6161{
6162 RTUINT64U uTmpRsp;
6163 RTGCPTR GCPtrTop;
6164 uTmpRsp.u = pCtx->rsp;
6165
6166 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6167 {
6168 GCPtrTop = uTmpRsp.u;
6169 uTmpRsp.u += cbItem;
6170 }
6171 else if (pCtx->ss.Attr.n.u1DefBig)
6172 {
6173 GCPtrTop = uTmpRsp.DWords.dw0;
6174 uTmpRsp.DWords.dw0 += cbItem;
6175 }
6176 else
6177 {
6178 GCPtrTop = uTmpRsp.Words.w0;
6179 uTmpRsp.Words.w0 += cbItem;
6180 }
6181 *puNewRsp = uTmpRsp.u;
6182 return GCPtrTop;
6183}
6184
6185
6186/**
6187 * Calculates the effective stack address for a push of the specified size as
6188 * well as the new temporary RSP value (upper bits may be masked).
6189 *
6190 * @returns Effective stack addressf for the push.
6191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6192 * @param pCtx Where to get the current stack mode.
6193 * @param pTmpRsp The temporary stack pointer. This is updated.
6194 * @param cbItem The size of the stack item to pop.
6195 */
6196DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6197{
6198 RTGCPTR GCPtrTop;
6199
6200 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6201 GCPtrTop = pTmpRsp->u -= cbItem;
6202 else if (pCtx->ss.Attr.n.u1DefBig)
6203 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6204 else
6205 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6206 return GCPtrTop;
6207}
6208
6209
6210/**
6211 * Gets the effective stack address for a pop of the specified size and
6212 * calculates and updates the temporary RSP.
6213 *
6214 * @returns Current stack pointer.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param pCtx Where to get the current stack mode.
6217 * @param pTmpRsp The temporary stack pointer. This is updated.
6218 * @param cbItem The size of the stack item to pop.
6219 */
6220DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6221{
6222 RTGCPTR GCPtrTop;
6223 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6224 {
6225 GCPtrTop = pTmpRsp->u;
6226 pTmpRsp->u += cbItem;
6227 }
6228 else if (pCtx->ss.Attr.n.u1DefBig)
6229 {
6230 GCPtrTop = pTmpRsp->DWords.dw0;
6231 pTmpRsp->DWords.dw0 += cbItem;
6232 }
6233 else
6234 {
6235 GCPtrTop = pTmpRsp->Words.w0;
6236 pTmpRsp->Words.w0 += cbItem;
6237 }
6238 return GCPtrTop;
6239}
6240
6241/** @} */
6242
6243
6244/** @name FPU access and helpers.
6245 *
6246 * @{
6247 */
6248
6249
6250/**
6251 * Hook for preparing to use the host FPU.
6252 *
6253 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6254 *
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 */
6257DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6258{
6259#ifdef IN_RING3
6260 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6261#else
6262 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6263#endif
6264}
6265
6266
6267/**
6268 * Hook for preparing to use the host FPU for SSE
6269 *
6270 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6271 *
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 */
6274DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6275{
6276 iemFpuPrepareUsage(pVCpu);
6277}
6278
6279
6280/**
6281 * Hook for actualizing the guest FPU state before the interpreter reads it.
6282 *
6283 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6284 *
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 */
6287DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6288{
6289#ifdef IN_RING3
6290 NOREF(pVCpu);
6291#else
6292 CPUMRZFpuStateActualizeForRead(pVCpu);
6293#endif
6294}
6295
6296
6297/**
6298 * Hook for actualizing the guest FPU state before the interpreter changes it.
6299 *
6300 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6301 *
6302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6303 */
6304DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6305{
6306#ifdef IN_RING3
6307 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6308#else
6309 CPUMRZFpuStateActualizeForChange(pVCpu);
6310#endif
6311}
6312
6313
6314/**
6315 * Hook for actualizing the guest XMM0..15 register state for read only.
6316 *
6317 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6318 *
6319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6320 */
6321DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6322{
6323#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6324 NOREF(pVCpu);
6325#else
6326 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6327#endif
6328}
6329
6330
6331/**
6332 * Hook for actualizing the guest XMM0..15 register state for read+write.
6333 *
6334 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6335 *
6336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6337 */
6338DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6339{
6340#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6341 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6342#else
6343 CPUMRZFpuStateActualizeForChange(pVCpu);
6344#endif
6345}
6346
6347
6348/**
6349 * Stores a QNaN value into a FPU register.
6350 *
6351 * @param pReg Pointer to the register.
6352 */
6353DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6354{
6355 pReg->au32[0] = UINT32_C(0x00000000);
6356 pReg->au32[1] = UINT32_C(0xc0000000);
6357 pReg->au16[4] = UINT16_C(0xffff);
6358}
6359
6360
6361/**
6362 * Updates the FOP, FPU.CS and FPUIP registers.
6363 *
6364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6365 * @param pCtx The CPU context.
6366 * @param pFpuCtx The FPU context.
6367 */
6368DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6369{
6370 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6371 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6372 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6373 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6374 {
6375 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6376 * happens in real mode here based on the fnsave and fnstenv images. */
6377 pFpuCtx->CS = 0;
6378 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6379 }
6380 else
6381 {
6382 pFpuCtx->CS = pCtx->cs.Sel;
6383 pFpuCtx->FPUIP = pCtx->rip;
6384 }
6385}
6386
6387
6388/**
6389 * Updates the x87.DS and FPUDP registers.
6390 *
6391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6392 * @param pCtx The CPU context.
6393 * @param pFpuCtx The FPU context.
6394 * @param iEffSeg The effective segment register.
6395 * @param GCPtrEff The effective address relative to @a iEffSeg.
6396 */
6397DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6398{
6399 RTSEL sel;
6400 switch (iEffSeg)
6401 {
6402 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6403 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6404 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6405 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6406 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6407 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6408 default:
6409 AssertMsgFailed(("%d\n", iEffSeg));
6410 sel = pCtx->ds.Sel;
6411 }
6412 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6413 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6414 {
6415 pFpuCtx->DS = 0;
6416 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6417 }
6418 else
6419 {
6420 pFpuCtx->DS = sel;
6421 pFpuCtx->FPUDP = GCPtrEff;
6422 }
6423}
6424
6425
6426/**
6427 * Rotates the stack registers in the push direction.
6428 *
6429 * @param pFpuCtx The FPU context.
6430 * @remarks This is a complete waste of time, but fxsave stores the registers in
6431 * stack order.
6432 */
6433DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6434{
6435 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6436 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6437 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6438 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6439 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6440 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6441 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6442 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6443 pFpuCtx->aRegs[0].r80 = r80Tmp;
6444}
6445
6446
6447/**
6448 * Rotates the stack registers in the pop direction.
6449 *
6450 * @param pFpuCtx The FPU context.
6451 * @remarks This is a complete waste of time, but fxsave stores the registers in
6452 * stack order.
6453 */
6454DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6455{
6456 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6457 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6458 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6459 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6460 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6461 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6462 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6463 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6464 pFpuCtx->aRegs[7].r80 = r80Tmp;
6465}
6466
6467
6468/**
6469 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6470 * exception prevents it.
6471 *
6472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6473 * @param pResult The FPU operation result to push.
6474 * @param pFpuCtx The FPU context.
6475 */
6476IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6477{
6478 /* Update FSW and bail if there are pending exceptions afterwards. */
6479 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6480 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6481 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6482 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6483 {
6484 pFpuCtx->FSW = fFsw;
6485 return;
6486 }
6487
6488 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6489 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6490 {
6491 /* All is fine, push the actual value. */
6492 pFpuCtx->FTW |= RT_BIT(iNewTop);
6493 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6494 }
6495 else if (pFpuCtx->FCW & X86_FCW_IM)
6496 {
6497 /* Masked stack overflow, push QNaN. */
6498 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6499 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6500 }
6501 else
6502 {
6503 /* Raise stack overflow, don't push anything. */
6504 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6505 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6506 return;
6507 }
6508
6509 fFsw &= ~X86_FSW_TOP_MASK;
6510 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6511 pFpuCtx->FSW = fFsw;
6512
6513 iemFpuRotateStackPush(pFpuCtx);
6514}
6515
6516
6517/**
6518 * Stores a result in a FPU register and updates the FSW and FTW.
6519 *
6520 * @param pFpuCtx The FPU context.
6521 * @param pResult The result to store.
6522 * @param iStReg Which FPU register to store it in.
6523 */
6524IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6525{
6526 Assert(iStReg < 8);
6527 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6528 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6529 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6530 pFpuCtx->FTW |= RT_BIT(iReg);
6531 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6532}
6533
6534
6535/**
6536 * Only updates the FPU status word (FSW) with the result of the current
6537 * instruction.
6538 *
6539 * @param pFpuCtx The FPU context.
6540 * @param u16FSW The FSW output of the current instruction.
6541 */
6542IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6543{
6544 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6545 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6546}
6547
6548
6549/**
6550 * Pops one item off the FPU stack if no pending exception prevents it.
6551 *
6552 * @param pFpuCtx The FPU context.
6553 */
6554IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6555{
6556 /* Check pending exceptions. */
6557 uint16_t uFSW = pFpuCtx->FSW;
6558 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6559 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6560 return;
6561
6562 /* TOP--. */
6563 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6564 uFSW &= ~X86_FSW_TOP_MASK;
6565 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6566 pFpuCtx->FSW = uFSW;
6567
6568 /* Mark the previous ST0 as empty. */
6569 iOldTop >>= X86_FSW_TOP_SHIFT;
6570 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6571
6572 /* Rotate the registers. */
6573 iemFpuRotateStackPop(pFpuCtx);
6574}
6575
6576
6577/**
6578 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6579 *
6580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6581 * @param pResult The FPU operation result to push.
6582 */
6583IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6584{
6585 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6586 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6587 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6588 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6589}
6590
6591
6592/**
6593 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6594 * and sets FPUDP and FPUDS.
6595 *
6596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6597 * @param pResult The FPU operation result to push.
6598 * @param iEffSeg The effective segment register.
6599 * @param GCPtrEff The effective address relative to @a iEffSeg.
6600 */
6601IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6602{
6603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6604 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6605 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6607 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6608}
6609
6610
6611/**
6612 * Replace ST0 with the first value and push the second onto the FPU stack,
6613 * unless a pending exception prevents it.
6614 *
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 * @param pResult The FPU operation result to store and push.
6617 */
6618IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6619{
6620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6623
6624 /* Update FSW and bail if there are pending exceptions afterwards. */
6625 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6626 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6627 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6628 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6629 {
6630 pFpuCtx->FSW = fFsw;
6631 return;
6632 }
6633
6634 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6635 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6636 {
6637 /* All is fine, push the actual value. */
6638 pFpuCtx->FTW |= RT_BIT(iNewTop);
6639 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6640 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6641 }
6642 else if (pFpuCtx->FCW & X86_FCW_IM)
6643 {
6644 /* Masked stack overflow, push QNaN. */
6645 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6646 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6647 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6648 }
6649 else
6650 {
6651 /* Raise stack overflow, don't push anything. */
6652 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6654 return;
6655 }
6656
6657 fFsw &= ~X86_FSW_TOP_MASK;
6658 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6659 pFpuCtx->FSW = fFsw;
6660
6661 iemFpuRotateStackPush(pFpuCtx);
6662}
6663
6664
6665/**
6666 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6667 * FOP.
6668 *
6669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6670 * @param pResult The result to store.
6671 * @param iStReg Which FPU register to store it in.
6672 */
6673IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6674{
6675 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6676 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6677 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6678 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6679}
6680
6681
6682/**
6683 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6684 * FOP, and then pops the stack.
6685 *
6686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6687 * @param pResult The result to store.
6688 * @param iStReg Which FPU register to store it in.
6689 */
6690IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6691{
6692 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6693 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6694 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6695 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6696 iemFpuMaybePopOne(pFpuCtx);
6697}
6698
6699
6700/**
6701 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6702 * FPUDP, and FPUDS.
6703 *
6704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6705 * @param pResult The result to store.
6706 * @param iStReg Which FPU register to store it in.
6707 * @param iEffSeg The effective memory operand selector register.
6708 * @param GCPtrEff The effective memory operand offset.
6709 */
6710IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6711 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6712{
6713 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6714 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6715 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6716 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6717 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6718}
6719
6720
6721/**
6722 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6723 * FPUDP, and FPUDS, and then pops the stack.
6724 *
6725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6726 * @param pResult The result to store.
6727 * @param iStReg Which FPU register to store it in.
6728 * @param iEffSeg The effective memory operand selector register.
6729 * @param GCPtrEff The effective memory operand offset.
6730 */
6731IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6732 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6733{
6734 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6735 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6736 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6737 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6738 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6739 iemFpuMaybePopOne(pFpuCtx);
6740}
6741
6742
6743/**
6744 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6745 *
6746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6747 */
6748IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6749{
6750 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6751 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6752 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6753}
6754
6755
6756/**
6757 * Marks the specified stack register as free (for FFREE).
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param iStReg The register to free.
6761 */
6762IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6763{
6764 Assert(iStReg < 8);
6765 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6766 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6767 pFpuCtx->FTW &= ~RT_BIT(iReg);
6768}
6769
6770
6771/**
6772 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6773 *
6774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6775 */
6776IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6777{
6778 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6779 uint16_t uFsw = pFpuCtx->FSW;
6780 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6781 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6782 uFsw &= ~X86_FSW_TOP_MASK;
6783 uFsw |= uTop;
6784 pFpuCtx->FSW = uFsw;
6785}
6786
6787
6788/**
6789 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6790 *
6791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6792 */
6793IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6794{
6795 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6796 uint16_t uFsw = pFpuCtx->FSW;
6797 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6798 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6799 uFsw &= ~X86_FSW_TOP_MASK;
6800 uFsw |= uTop;
6801 pFpuCtx->FSW = uFsw;
6802}
6803
6804
6805/**
6806 * Updates the FSW, FOP, FPUIP, and FPUCS.
6807 *
6808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6809 * @param u16FSW The FSW from the current instruction.
6810 */
6811IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6812{
6813 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6814 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6815 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6816 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6817}
6818
6819
6820/**
6821 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6822 *
6823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6824 * @param u16FSW The FSW from the current instruction.
6825 */
6826IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6827{
6828 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6829 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6830 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6831 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6832 iemFpuMaybePopOne(pFpuCtx);
6833}
6834
6835
6836/**
6837 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6838 *
6839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6840 * @param u16FSW The FSW from the current instruction.
6841 * @param iEffSeg The effective memory operand selector register.
6842 * @param GCPtrEff The effective memory operand offset.
6843 */
6844IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6845{
6846 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6847 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6848 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6849 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6850 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6851}
6852
6853
6854/**
6855 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6856 *
6857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6858 * @param u16FSW The FSW from the current instruction.
6859 */
6860IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6861{
6862 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6863 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6864 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6865 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6866 iemFpuMaybePopOne(pFpuCtx);
6867 iemFpuMaybePopOne(pFpuCtx);
6868}
6869
6870
6871/**
6872 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
6873 *
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 * @param u16FSW The FSW from the current instruction.
6876 * @param iEffSeg The effective memory operand selector register.
6877 * @param GCPtrEff The effective memory operand offset.
6878 */
6879IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6880{
6881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6882 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6883 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6884 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6885 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6886 iemFpuMaybePopOne(pFpuCtx);
6887}
6888
6889
6890/**
6891 * Worker routine for raising an FPU stack underflow exception.
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 * @param pFpuCtx The FPU context.
6895 * @param iStReg The stack register being accessed.
6896 */
6897IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
6898{
6899 Assert(iStReg < 8 || iStReg == UINT8_MAX);
6900 if (pFpuCtx->FCW & X86_FCW_IM)
6901 {
6902 /* Masked underflow. */
6903 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6904 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6905 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6906 if (iStReg != UINT8_MAX)
6907 {
6908 pFpuCtx->FTW |= RT_BIT(iReg);
6909 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6910 }
6911 }
6912 else
6913 {
6914 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6915 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6916 }
6917}
6918
6919
6920/**
6921 * Raises a FPU stack underflow exception.
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 * @param iStReg The destination register that should be loaded
6925 * with QNaN if \#IS is not masked. Specify
6926 * UINT8_MAX if none (like for fcom).
6927 */
6928DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
6929{
6930 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6931 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6932 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6933 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6934}
6935
6936
6937DECL_NO_INLINE(IEM_STATIC, void)
6938iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6939{
6940 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6941 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6942 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6943 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6944 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6945}
6946
6947
6948DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
6949{
6950 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6951 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6952 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6953 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6954 iemFpuMaybePopOne(pFpuCtx);
6955}
6956
6957
6958DECL_NO_INLINE(IEM_STATIC, void)
6959iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6960{
6961 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6962 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6963 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6964 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6965 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6966 iemFpuMaybePopOne(pFpuCtx);
6967}
6968
6969
6970DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
6971{
6972 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6973 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6974 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6975 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
6976 iemFpuMaybePopOne(pFpuCtx);
6977 iemFpuMaybePopOne(pFpuCtx);
6978}
6979
6980
6981DECL_NO_INLINE(IEM_STATIC, void)
6982iemFpuStackPushUnderflow(PVMCPU pVCpu)
6983{
6984 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6985 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6986 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6987
6988 if (pFpuCtx->FCW & X86_FCW_IM)
6989 {
6990 /* Masked overflow - Push QNaN. */
6991 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6992 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6993 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6994 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6995 pFpuCtx->FTW |= RT_BIT(iNewTop);
6996 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6997 iemFpuRotateStackPush(pFpuCtx);
6998 }
6999 else
7000 {
7001 /* Exception pending - don't change TOP or the register stack. */
7002 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7003 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7004 }
7005}
7006
7007
7008DECL_NO_INLINE(IEM_STATIC, void)
7009iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7010{
7011 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7012 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7013 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7014
7015 if (pFpuCtx->FCW & X86_FCW_IM)
7016 {
7017 /* Masked overflow - Push QNaN. */
7018 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7019 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7020 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7021 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7022 pFpuCtx->FTW |= RT_BIT(iNewTop);
7023 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7024 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7025 iemFpuRotateStackPush(pFpuCtx);
7026 }
7027 else
7028 {
7029 /* Exception pending - don't change TOP or the register stack. */
7030 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7031 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7032 }
7033}
7034
7035
7036/**
7037 * Worker routine for raising an FPU stack overflow exception on a push.
7038 *
7039 * @param pFpuCtx The FPU context.
7040 */
7041IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7042{
7043 if (pFpuCtx->FCW & X86_FCW_IM)
7044 {
7045 /* Masked overflow. */
7046 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7047 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7048 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7049 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7050 pFpuCtx->FTW |= RT_BIT(iNewTop);
7051 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7052 iemFpuRotateStackPush(pFpuCtx);
7053 }
7054 else
7055 {
7056 /* Exception pending - don't change TOP or the register stack. */
7057 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7058 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7059 }
7060}
7061
7062
7063/**
7064 * Raises a FPU stack overflow exception on a push.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 */
7068DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7069{
7070 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7071 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7072 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7073 iemFpuStackPushOverflowOnly(pFpuCtx);
7074}
7075
7076
7077/**
7078 * Raises a FPU stack overflow exception on a push with a memory operand.
7079 *
7080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7081 * @param iEffSeg The effective memory operand selector register.
7082 * @param GCPtrEff The effective memory operand offset.
7083 */
7084DECL_NO_INLINE(IEM_STATIC, void)
7085iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7086{
7087 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7088 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7089 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7090 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7091 iemFpuStackPushOverflowOnly(pFpuCtx);
7092}
7093
7094
7095IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7096{
7097 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7098 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7099 if (pFpuCtx->FTW & RT_BIT(iReg))
7100 return VINF_SUCCESS;
7101 return VERR_NOT_FOUND;
7102}
7103
7104
7105IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7106{
7107 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7108 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7109 if (pFpuCtx->FTW & RT_BIT(iReg))
7110 {
7111 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7112 return VINF_SUCCESS;
7113 }
7114 return VERR_NOT_FOUND;
7115}
7116
7117
7118IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7119 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7120{
7121 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7122 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7123 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7124 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7125 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7126 {
7127 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7128 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7129 return VINF_SUCCESS;
7130 }
7131 return VERR_NOT_FOUND;
7132}
7133
7134
7135IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7136{
7137 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7138 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7139 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7140 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7141 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7142 {
7143 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7144 return VINF_SUCCESS;
7145 }
7146 return VERR_NOT_FOUND;
7147}
7148
7149
7150/**
7151 * Updates the FPU exception status after FCW is changed.
7152 *
7153 * @param pFpuCtx The FPU context.
7154 */
7155IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7156{
7157 uint16_t u16Fsw = pFpuCtx->FSW;
7158 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7159 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7160 else
7161 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7162 pFpuCtx->FSW = u16Fsw;
7163}
7164
7165
7166/**
7167 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7168 *
7169 * @returns The full FTW.
7170 * @param pFpuCtx The FPU context.
7171 */
7172IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7173{
7174 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7175 uint16_t u16Ftw = 0;
7176 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7177 for (unsigned iSt = 0; iSt < 8; iSt++)
7178 {
7179 unsigned const iReg = (iSt + iTop) & 7;
7180 if (!(u8Ftw & RT_BIT(iReg)))
7181 u16Ftw |= 3 << (iReg * 2); /* empty */
7182 else
7183 {
7184 uint16_t uTag;
7185 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7186 if (pr80Reg->s.uExponent == 0x7fff)
7187 uTag = 2; /* Exponent is all 1's => Special. */
7188 else if (pr80Reg->s.uExponent == 0x0000)
7189 {
7190 if (pr80Reg->s.u64Mantissa == 0x0000)
7191 uTag = 1; /* All bits are zero => Zero. */
7192 else
7193 uTag = 2; /* Must be special. */
7194 }
7195 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7196 uTag = 0; /* Valid. */
7197 else
7198 uTag = 2; /* Must be special. */
7199
7200 u16Ftw |= uTag << (iReg * 2); /* empty */
7201 }
7202 }
7203
7204 return u16Ftw;
7205}
7206
7207
7208/**
7209 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7210 *
7211 * @returns The compressed FTW.
7212 * @param u16FullFtw The full FTW to convert.
7213 */
7214IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7215{
7216 uint8_t u8Ftw = 0;
7217 for (unsigned i = 0; i < 8; i++)
7218 {
7219 if ((u16FullFtw & 3) != 3 /*empty*/)
7220 u8Ftw |= RT_BIT(i);
7221 u16FullFtw >>= 2;
7222 }
7223
7224 return u8Ftw;
7225}
7226
7227/** @} */
7228
7229
7230/** @name Memory access.
7231 *
7232 * @{
7233 */
7234
7235
7236/**
7237 * Updates the IEMCPU::cbWritten counter if applicable.
7238 *
7239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7240 * @param fAccess The access being accounted for.
7241 * @param cbMem The access size.
7242 */
7243DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7244{
7245 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7246 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7247 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7248}
7249
7250
7251/**
7252 * Checks if the given segment can be written to, raise the appropriate
7253 * exception if not.
7254 *
7255 * @returns VBox strict status code.
7256 *
7257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7258 * @param pHid Pointer to the hidden register.
7259 * @param iSegReg The register number.
7260 * @param pu64BaseAddr Where to return the base address to use for the
7261 * segment. (In 64-bit code it may differ from the
7262 * base in the hidden segment.)
7263 */
7264IEM_STATIC VBOXSTRICTRC
7265iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7266{
7267 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7268 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7269 else
7270 {
7271 if (!pHid->Attr.n.u1Present)
7272 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7273
7274 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7275 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7276 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7277 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7278 *pu64BaseAddr = pHid->u64Base;
7279 }
7280 return VINF_SUCCESS;
7281}
7282
7283
7284/**
7285 * Checks if the given segment can be read from, raise the appropriate
7286 * exception if not.
7287 *
7288 * @returns VBox strict status code.
7289 *
7290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7291 * @param pHid Pointer to the hidden register.
7292 * @param iSegReg The register number.
7293 * @param pu64BaseAddr Where to return the base address to use for the
7294 * segment. (In 64-bit code it may differ from the
7295 * base in the hidden segment.)
7296 */
7297IEM_STATIC VBOXSTRICTRC
7298iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7299{
7300 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7301 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7302 else
7303 {
7304 if (!pHid->Attr.n.u1Present)
7305 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7306
7307 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7308 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7309 *pu64BaseAddr = pHid->u64Base;
7310 }
7311 return VINF_SUCCESS;
7312}
7313
7314
7315/**
7316 * Applies the segment limit, base and attributes.
7317 *
7318 * This may raise a \#GP or \#SS.
7319 *
7320 * @returns VBox strict status code.
7321 *
7322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7323 * @param fAccess The kind of access which is being performed.
7324 * @param iSegReg The index of the segment register to apply.
7325 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7326 * TSS, ++).
7327 * @param cbMem The access size.
7328 * @param pGCPtrMem Pointer to the guest memory address to apply
7329 * segmentation to. Input and output parameter.
7330 */
7331IEM_STATIC VBOXSTRICTRC
7332iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7333{
7334 if (iSegReg == UINT8_MAX)
7335 return VINF_SUCCESS;
7336
7337 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7338 switch (pVCpu->iem.s.enmCpuMode)
7339 {
7340 case IEMMODE_16BIT:
7341 case IEMMODE_32BIT:
7342 {
7343 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7344 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7345
7346 if ( pSel->Attr.n.u1Present
7347 && !pSel->Attr.n.u1Unusable)
7348 {
7349 Assert(pSel->Attr.n.u1DescType);
7350 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7351 {
7352 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7353 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7354 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7355
7356 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7357 {
7358 /** @todo CPL check. */
7359 }
7360
7361 /*
7362 * There are two kinds of data selectors, normal and expand down.
7363 */
7364 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7365 {
7366 if ( GCPtrFirst32 > pSel->u32Limit
7367 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7368 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7369 }
7370 else
7371 {
7372 /*
7373 * The upper boundary is defined by the B bit, not the G bit!
7374 */
7375 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7376 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7377 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7378 }
7379 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7380 }
7381 else
7382 {
7383
7384 /*
7385 * Code selector and usually be used to read thru, writing is
7386 * only permitted in real and V8086 mode.
7387 */
7388 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7389 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7390 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7391 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7392 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7393
7394 if ( GCPtrFirst32 > pSel->u32Limit
7395 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7396 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7397
7398 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7399 {
7400 /** @todo CPL check. */
7401 }
7402
7403 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7404 }
7405 }
7406 else
7407 return iemRaiseGeneralProtectionFault0(pVCpu);
7408 return VINF_SUCCESS;
7409 }
7410
7411 case IEMMODE_64BIT:
7412 {
7413 RTGCPTR GCPtrMem = *pGCPtrMem;
7414 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7415 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7416
7417 Assert(cbMem >= 1);
7418 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7419 return VINF_SUCCESS;
7420 return iemRaiseGeneralProtectionFault0(pVCpu);
7421 }
7422
7423 default:
7424 AssertFailedReturn(VERR_IEM_IPE_7);
7425 }
7426}
7427
7428
7429/**
7430 * Translates a virtual address to a physical physical address and checks if we
7431 * can access the page as specified.
7432 *
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 * @param GCPtrMem The virtual address.
7435 * @param fAccess The intended access.
7436 * @param pGCPhysMem Where to return the physical address.
7437 */
7438IEM_STATIC VBOXSTRICTRC
7439iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7440{
7441 /** @todo Need a different PGM interface here. We're currently using
7442 * generic / REM interfaces. this won't cut it for R0 & RC. */
7443 RTGCPHYS GCPhys;
7444 uint64_t fFlags;
7445 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7446 if (RT_FAILURE(rc))
7447 {
7448 /** @todo Check unassigned memory in unpaged mode. */
7449 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7450 *pGCPhysMem = NIL_RTGCPHYS;
7451 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7452 }
7453
7454 /* If the page is writable and does not have the no-exec bit set, all
7455 access is allowed. Otherwise we'll have to check more carefully... */
7456 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7457 {
7458 /* Write to read only memory? */
7459 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7460 && !(fFlags & X86_PTE_RW)
7461 && ( pVCpu->iem.s.uCpl != 0
7462 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7463 {
7464 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7465 *pGCPhysMem = NIL_RTGCPHYS;
7466 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7467 }
7468
7469 /* Kernel memory accessed by userland? */
7470 if ( !(fFlags & X86_PTE_US)
7471 && pVCpu->iem.s.uCpl == 3
7472 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7473 {
7474 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7475 *pGCPhysMem = NIL_RTGCPHYS;
7476 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7477 }
7478
7479 /* Executing non-executable memory? */
7480 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7481 && (fFlags & X86_PTE_PAE_NX)
7482 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7483 {
7484 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7485 *pGCPhysMem = NIL_RTGCPHYS;
7486 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7487 VERR_ACCESS_DENIED);
7488 }
7489 }
7490
7491 /*
7492 * Set the dirty / access flags.
7493 * ASSUMES this is set when the address is translated rather than on committ...
7494 */
7495 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7496 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7497 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7498 {
7499 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7500 AssertRC(rc2);
7501 }
7502
7503 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7504 *pGCPhysMem = GCPhys;
7505 return VINF_SUCCESS;
7506}
7507
7508
7509
7510/**
7511 * Maps a physical page.
7512 *
7513 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7515 * @param GCPhysMem The physical address.
7516 * @param fAccess The intended access.
7517 * @param ppvMem Where to return the mapping address.
7518 * @param pLock The PGM lock.
7519 */
7520IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7521{
7522#ifdef IEM_VERIFICATION_MODE_FULL
7523 /* Force the alternative path so we can ignore writes. */
7524 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7525 {
7526 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7527 {
7528 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7529 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7530 if (RT_FAILURE(rc2))
7531 pVCpu->iem.s.fProblematicMemory = true;
7532 }
7533 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7534 }
7535#endif
7536#ifdef IEM_LOG_MEMORY_WRITES
7537 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7538 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7539#endif
7540#ifdef IEM_VERIFICATION_MODE_MINIMAL
7541 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7542#endif
7543
7544 /** @todo This API may require some improving later. A private deal with PGM
7545 * regarding locking and unlocking needs to be struct. A couple of TLBs
7546 * living in PGM, but with publicly accessible inlined access methods
7547 * could perhaps be an even better solution. */
7548 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7549 GCPhysMem,
7550 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7551 pVCpu->iem.s.fBypassHandlers,
7552 ppvMem,
7553 pLock);
7554 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7555 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7556
7557#ifdef IEM_VERIFICATION_MODE_FULL
7558 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7559 pVCpu->iem.s.fProblematicMemory = true;
7560#endif
7561 return rc;
7562}
7563
7564
7565/**
7566 * Unmap a page previously mapped by iemMemPageMap.
7567 *
7568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7569 * @param GCPhysMem The physical address.
7570 * @param fAccess The intended access.
7571 * @param pvMem What iemMemPageMap returned.
7572 * @param pLock The PGM lock.
7573 */
7574DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7575{
7576 NOREF(pVCpu);
7577 NOREF(GCPhysMem);
7578 NOREF(fAccess);
7579 NOREF(pvMem);
7580 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7581}
7582
7583
7584/**
7585 * Looks up a memory mapping entry.
7586 *
7587 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param pvMem The memory address.
7590 * @param fAccess The access to.
7591 */
7592DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7593{
7594 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7595 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7596 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7597 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7598 return 0;
7599 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7600 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7601 return 1;
7602 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7603 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7604 return 2;
7605 return VERR_NOT_FOUND;
7606}
7607
7608
7609/**
7610 * Finds a free memmap entry when using iNextMapping doesn't work.
7611 *
7612 * @returns Memory mapping index, 1024 on failure.
7613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7614 */
7615IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7616{
7617 /*
7618 * The easy case.
7619 */
7620 if (pVCpu->iem.s.cActiveMappings == 0)
7621 {
7622 pVCpu->iem.s.iNextMapping = 1;
7623 return 0;
7624 }
7625
7626 /* There should be enough mappings for all instructions. */
7627 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7628
7629 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7630 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7631 return i;
7632
7633 AssertFailedReturn(1024);
7634}
7635
7636
7637/**
7638 * Commits a bounce buffer that needs writing back and unmaps it.
7639 *
7640 * @returns Strict VBox status code.
7641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7642 * @param iMemMap The index of the buffer to commit.
7643 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7644 * Always false in ring-3, obviously.
7645 */
7646IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7647{
7648 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7649 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7650#ifdef IN_RING3
7651 Assert(!fPostponeFail);
7652#endif
7653
7654 /*
7655 * Do the writing.
7656 */
7657#ifndef IEM_VERIFICATION_MODE_MINIMAL
7658 PVM pVM = pVCpu->CTX_SUFF(pVM);
7659 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7660 && !IEM_VERIFICATION_ENABLED(pVCpu))
7661 {
7662 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7663 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7664 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7665 if (!pVCpu->iem.s.fBypassHandlers)
7666 {
7667 /*
7668 * Carefully and efficiently dealing with access handler return
7669 * codes make this a little bloated.
7670 */
7671 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7673 pbBuf,
7674 cbFirst,
7675 PGMACCESSORIGIN_IEM);
7676 if (rcStrict == VINF_SUCCESS)
7677 {
7678 if (cbSecond)
7679 {
7680 rcStrict = PGMPhysWrite(pVM,
7681 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7682 pbBuf + cbFirst,
7683 cbSecond,
7684 PGMACCESSORIGIN_IEM);
7685 if (rcStrict == VINF_SUCCESS)
7686 { /* nothing */ }
7687 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7688 {
7689 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7690 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7691 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7692 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7693 }
7694# ifndef IN_RING3
7695 else if (fPostponeFail)
7696 {
7697 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7699 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7700 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7701 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7702 return iemSetPassUpStatus(pVCpu, rcStrict);
7703 }
7704# endif
7705 else
7706 {
7707 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7708 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7709 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7710 return rcStrict;
7711 }
7712 }
7713 }
7714 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7715 {
7716 if (!cbSecond)
7717 {
7718 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7719 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7720 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7721 }
7722 else
7723 {
7724 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7725 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7726 pbBuf + cbFirst,
7727 cbSecond,
7728 PGMACCESSORIGIN_IEM);
7729 if (rcStrict2 == VINF_SUCCESS)
7730 {
7731 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7734 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7735 }
7736 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7737 {
7738 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7740 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7741 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7742 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7743 }
7744# ifndef IN_RING3
7745 else if (fPostponeFail)
7746 {
7747 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7748 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7749 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7750 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7751 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7752 return iemSetPassUpStatus(pVCpu, rcStrict);
7753 }
7754# endif
7755 else
7756 {
7757 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7759 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7760 return rcStrict2;
7761 }
7762 }
7763 }
7764# ifndef IN_RING3
7765 else if (fPostponeFail)
7766 {
7767 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7768 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7769 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7770 if (!cbSecond)
7771 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7772 else
7773 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7774 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7775 return iemSetPassUpStatus(pVCpu, rcStrict);
7776 }
7777# endif
7778 else
7779 {
7780 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7781 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7782 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7783 return rcStrict;
7784 }
7785 }
7786 else
7787 {
7788 /*
7789 * No access handlers, much simpler.
7790 */
7791 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7792 if (RT_SUCCESS(rc))
7793 {
7794 if (cbSecond)
7795 {
7796 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7797 if (RT_SUCCESS(rc))
7798 { /* likely */ }
7799 else
7800 {
7801 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7802 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7803 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7804 return rc;
7805 }
7806 }
7807 }
7808 else
7809 {
7810 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7812 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7813 return rc;
7814 }
7815 }
7816 }
7817#endif
7818
7819#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7820 /*
7821 * Record the write(s).
7822 */
7823 if (!pVCpu->iem.s.fNoRem)
7824 {
7825 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7826 if (pEvtRec)
7827 {
7828 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7829 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7830 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7831 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7832 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7833 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7834 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7835 }
7836 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7837 {
7838 pEvtRec = iemVerifyAllocRecord(pVCpu);
7839 if (pEvtRec)
7840 {
7841 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7842 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7843 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7844 memcpy(pEvtRec->u.RamWrite.ab,
7845 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7846 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7847 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7848 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7849 }
7850 }
7851 }
7852#endif
7853#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7854 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7855 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7856 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7857 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7858 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7859 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7860
7861 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7862 g_cbIemWrote = cbWrote;
7863 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7864#endif
7865
7866 /*
7867 * Free the mapping entry.
7868 */
7869 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7870 Assert(pVCpu->iem.s.cActiveMappings != 0);
7871 pVCpu->iem.s.cActiveMappings--;
7872 return VINF_SUCCESS;
7873}
7874
7875
7876/**
7877 * iemMemMap worker that deals with a request crossing pages.
7878 */
7879IEM_STATIC VBOXSTRICTRC
7880iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
7881{
7882 /*
7883 * Do the address translations.
7884 */
7885 RTGCPHYS GCPhysFirst;
7886 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
7887 if (rcStrict != VINF_SUCCESS)
7888 return rcStrict;
7889
7890 RTGCPHYS GCPhysSecond;
7891 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
7892 fAccess, &GCPhysSecond);
7893 if (rcStrict != VINF_SUCCESS)
7894 return rcStrict;
7895 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
7896
7897 PVM pVM = pVCpu->CTX_SUFF(pVM);
7898#ifdef IEM_VERIFICATION_MODE_FULL
7899 /*
7900 * Detect problematic memory when verifying so we can select
7901 * the right execution engine. (TLB: Redo this.)
7902 */
7903 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7904 {
7905 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7906 if (RT_SUCCESS(rc2))
7907 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7908 if (RT_FAILURE(rc2))
7909 pVCpu->iem.s.fProblematicMemory = true;
7910 }
7911#endif
7912
7913
7914 /*
7915 * Read in the current memory content if it's a read, execute or partial
7916 * write access.
7917 */
7918 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7919 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
7920 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
7921
7922 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7923 {
7924 if (!pVCpu->iem.s.fBypassHandlers)
7925 {
7926 /*
7927 * Must carefully deal with access handler status codes here,
7928 * makes the code a bit bloated.
7929 */
7930 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
7931 if (rcStrict == VINF_SUCCESS)
7932 {
7933 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7934 if (rcStrict == VINF_SUCCESS)
7935 { /*likely */ }
7936 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7937 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7938 else
7939 {
7940 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
7941 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7942 return rcStrict;
7943 }
7944 }
7945 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7946 {
7947 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7948 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7949 {
7950 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7951 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7952 }
7953 else
7954 {
7955 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
7956 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
7957 return rcStrict2;
7958 }
7959 }
7960 else
7961 {
7962 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7963 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7964 return rcStrict;
7965 }
7966 }
7967 else
7968 {
7969 /*
7970 * No informational status codes here, much more straight forward.
7971 */
7972 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
7973 if (RT_SUCCESS(rc))
7974 {
7975 Assert(rc == VINF_SUCCESS);
7976 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
7977 if (RT_SUCCESS(rc))
7978 Assert(rc == VINF_SUCCESS);
7979 else
7980 {
7981 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
7982 return rc;
7983 }
7984 }
7985 else
7986 {
7987 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
7988 return rc;
7989 }
7990 }
7991
7992#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7993 if ( !pVCpu->iem.s.fNoRem
7994 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7995 {
7996 /*
7997 * Record the reads.
7998 */
7999 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8000 if (pEvtRec)
8001 {
8002 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8003 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8004 pEvtRec->u.RamRead.cb = cbFirstPage;
8005 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8006 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8007 }
8008 pEvtRec = iemVerifyAllocRecord(pVCpu);
8009 if (pEvtRec)
8010 {
8011 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8012 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8013 pEvtRec->u.RamRead.cb = cbSecondPage;
8014 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8015 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8016 }
8017 }
8018#endif
8019 }
8020#ifdef VBOX_STRICT
8021 else
8022 memset(pbBuf, 0xcc, cbMem);
8023 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8024 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8025#endif
8026
8027 /*
8028 * Commit the bounce buffer entry.
8029 */
8030 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8031 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8032 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8033 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8034 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8035 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8036 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8037 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8038 pVCpu->iem.s.cActiveMappings++;
8039
8040 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8041 *ppvMem = pbBuf;
8042 return VINF_SUCCESS;
8043}
8044
8045
8046/**
8047 * iemMemMap woker that deals with iemMemPageMap failures.
8048 */
8049IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8050 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8051{
8052 /*
8053 * Filter out conditions we can handle and the ones which shouldn't happen.
8054 */
8055 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8056 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8057 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8058 {
8059 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8060 return rcMap;
8061 }
8062 pVCpu->iem.s.cPotentialExits++;
8063
8064 /*
8065 * Read in the current memory content if it's a read, execute or partial
8066 * write access.
8067 */
8068 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8069 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8070 {
8071 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8072 memset(pbBuf, 0xff, cbMem);
8073 else
8074 {
8075 int rc;
8076 if (!pVCpu->iem.s.fBypassHandlers)
8077 {
8078 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8079 if (rcStrict == VINF_SUCCESS)
8080 { /* nothing */ }
8081 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8083 else
8084 {
8085 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8086 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8087 return rcStrict;
8088 }
8089 }
8090 else
8091 {
8092 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8093 if (RT_SUCCESS(rc))
8094 { /* likely */ }
8095 else
8096 {
8097 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8098 GCPhysFirst, rc));
8099 return rc;
8100 }
8101 }
8102 }
8103
8104#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8105 if ( !pVCpu->iem.s.fNoRem
8106 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8107 {
8108 /*
8109 * Record the read.
8110 */
8111 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8112 if (pEvtRec)
8113 {
8114 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8115 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8116 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8117 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8118 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8119 }
8120 }
8121#endif
8122 }
8123#ifdef VBOX_STRICT
8124 else
8125 memset(pbBuf, 0xcc, cbMem);
8126#endif
8127#ifdef VBOX_STRICT
8128 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8129 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8130#endif
8131
8132 /*
8133 * Commit the bounce buffer entry.
8134 */
8135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8136 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8137 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8138 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8139 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8140 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8141 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8142 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8143 pVCpu->iem.s.cActiveMappings++;
8144
8145 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8146 *ppvMem = pbBuf;
8147 return VINF_SUCCESS;
8148}
8149
8150
8151
8152/**
8153 * Maps the specified guest memory for the given kind of access.
8154 *
8155 * This may be using bounce buffering of the memory if it's crossing a page
8156 * boundary or if there is an access handler installed for any of it. Because
8157 * of lock prefix guarantees, we're in for some extra clutter when this
8158 * happens.
8159 *
8160 * This may raise a \#GP, \#SS, \#PF or \#AC.
8161 *
8162 * @returns VBox strict status code.
8163 *
8164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8165 * @param ppvMem Where to return the pointer to the mapped
8166 * memory.
8167 * @param cbMem The number of bytes to map. This is usually 1,
8168 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8169 * string operations it can be up to a page.
8170 * @param iSegReg The index of the segment register to use for
8171 * this access. The base and limits are checked.
8172 * Use UINT8_MAX to indicate that no segmentation
8173 * is required (for IDT, GDT and LDT accesses).
8174 * @param GCPtrMem The address of the guest memory.
8175 * @param fAccess How the memory is being accessed. The
8176 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8177 * how to map the memory, while the
8178 * IEM_ACCESS_WHAT_XXX bit is used when raising
8179 * exceptions.
8180 */
8181IEM_STATIC VBOXSTRICTRC
8182iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8183{
8184 /*
8185 * Check the input and figure out which mapping entry to use.
8186 */
8187 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8188 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8189 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8190
8191 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8192 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8193 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8194 {
8195 iMemMap = iemMemMapFindFree(pVCpu);
8196 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8197 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8198 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8199 pVCpu->iem.s.aMemMappings[2].fAccess),
8200 VERR_IEM_IPE_9);
8201 }
8202
8203 /*
8204 * Map the memory, checking that we can actually access it. If something
8205 * slightly complicated happens, fall back on bounce buffering.
8206 */
8207 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8208 if (rcStrict != VINF_SUCCESS)
8209 return rcStrict;
8210
8211 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8212 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8213
8214 RTGCPHYS GCPhysFirst;
8215 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8216 if (rcStrict != VINF_SUCCESS)
8217 return rcStrict;
8218
8219 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8220 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8221 if (fAccess & IEM_ACCESS_TYPE_READ)
8222 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8223
8224 void *pvMem;
8225 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8226 if (rcStrict != VINF_SUCCESS)
8227 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8228
8229 /*
8230 * Fill in the mapping table entry.
8231 */
8232 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8233 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8234 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8235 pVCpu->iem.s.cActiveMappings++;
8236
8237 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8238 *ppvMem = pvMem;
8239 return VINF_SUCCESS;
8240}
8241
8242
8243/**
8244 * Commits the guest memory if bounce buffered and unmaps it.
8245 *
8246 * @returns Strict VBox status code.
8247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8248 * @param pvMem The mapping.
8249 * @param fAccess The kind of access.
8250 */
8251IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8252{
8253 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8254 AssertReturn(iMemMap >= 0, iMemMap);
8255
8256 /* If it's bounce buffered, we may need to write back the buffer. */
8257 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8258 {
8259 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8260 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8261 }
8262 /* Otherwise unlock it. */
8263 else
8264 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8265
8266 /* Free the entry. */
8267 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8268 Assert(pVCpu->iem.s.cActiveMappings != 0);
8269 pVCpu->iem.s.cActiveMappings--;
8270 return VINF_SUCCESS;
8271}
8272
8273#ifdef IEM_WITH_SETJMP
8274
8275/**
8276 * Maps the specified guest memory for the given kind of access, longjmp on
8277 * error.
8278 *
8279 * This may be using bounce buffering of the memory if it's crossing a page
8280 * boundary or if there is an access handler installed for any of it. Because
8281 * of lock prefix guarantees, we're in for some extra clutter when this
8282 * happens.
8283 *
8284 * This may raise a \#GP, \#SS, \#PF or \#AC.
8285 *
8286 * @returns Pointer to the mapped memory.
8287 *
8288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8289 * @param cbMem The number of bytes to map. This is usually 1,
8290 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8291 * string operations it can be up to a page.
8292 * @param iSegReg The index of the segment register to use for
8293 * this access. The base and limits are checked.
8294 * Use UINT8_MAX to indicate that no segmentation
8295 * is required (for IDT, GDT and LDT accesses).
8296 * @param GCPtrMem The address of the guest memory.
8297 * @param fAccess How the memory is being accessed. The
8298 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8299 * how to map the memory, while the
8300 * IEM_ACCESS_WHAT_XXX bit is used when raising
8301 * exceptions.
8302 */
8303IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8304{
8305 /*
8306 * Check the input and figure out which mapping entry to use.
8307 */
8308 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8309 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8310 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8311
8312 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8313 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8314 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8315 {
8316 iMemMap = iemMemMapFindFree(pVCpu);
8317 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8318 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8319 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8320 pVCpu->iem.s.aMemMappings[2].fAccess),
8321 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8322 }
8323
8324 /*
8325 * Map the memory, checking that we can actually access it. If something
8326 * slightly complicated happens, fall back on bounce buffering.
8327 */
8328 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8329 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8330 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8331
8332 /* Crossing a page boundary? */
8333 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8334 { /* No (likely). */ }
8335 else
8336 {
8337 void *pvMem;
8338 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8339 if (rcStrict == VINF_SUCCESS)
8340 return pvMem;
8341 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8342 }
8343
8344 RTGCPHYS GCPhysFirst;
8345 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8346 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8347 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8348
8349 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8350 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8351 if (fAccess & IEM_ACCESS_TYPE_READ)
8352 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8353
8354 void *pvMem;
8355 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8356 if (rcStrict == VINF_SUCCESS)
8357 { /* likely */ }
8358 else
8359 {
8360 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8361 if (rcStrict == VINF_SUCCESS)
8362 return pvMem;
8363 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8364 }
8365
8366 /*
8367 * Fill in the mapping table entry.
8368 */
8369 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8370 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8371 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8372 pVCpu->iem.s.cActiveMappings++;
8373
8374 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8375 return pvMem;
8376}
8377
8378
8379/**
8380 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8381 *
8382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8383 * @param pvMem The mapping.
8384 * @param fAccess The kind of access.
8385 */
8386IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8387{
8388 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8389 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8390
8391 /* If it's bounce buffered, we may need to write back the buffer. */
8392 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8393 {
8394 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8395 {
8396 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8397 if (rcStrict == VINF_SUCCESS)
8398 return;
8399 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8400 }
8401 }
8402 /* Otherwise unlock it. */
8403 else
8404 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8405
8406 /* Free the entry. */
8407 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8408 Assert(pVCpu->iem.s.cActiveMappings != 0);
8409 pVCpu->iem.s.cActiveMappings--;
8410}
8411
8412#endif
8413
8414#ifndef IN_RING3
8415/**
8416 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8417 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8418 *
8419 * Allows the instruction to be completed and retired, while the IEM user will
8420 * return to ring-3 immediately afterwards and do the postponed writes there.
8421 *
8422 * @returns VBox status code (no strict statuses). Caller must check
8423 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8425 * @param pvMem The mapping.
8426 * @param fAccess The kind of access.
8427 */
8428IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8429{
8430 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8431 AssertReturn(iMemMap >= 0, iMemMap);
8432
8433 /* If it's bounce buffered, we may need to write back the buffer. */
8434 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8435 {
8436 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8437 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8438 }
8439 /* Otherwise unlock it. */
8440 else
8441 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8442
8443 /* Free the entry. */
8444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8445 Assert(pVCpu->iem.s.cActiveMappings != 0);
8446 pVCpu->iem.s.cActiveMappings--;
8447 return VINF_SUCCESS;
8448}
8449#endif
8450
8451
8452/**
8453 * Rollbacks mappings, releasing page locks and such.
8454 *
8455 * The caller shall only call this after checking cActiveMappings.
8456 *
8457 * @returns Strict VBox status code to pass up.
8458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8459 */
8460IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8461{
8462 Assert(pVCpu->iem.s.cActiveMappings > 0);
8463
8464 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8465 while (iMemMap-- > 0)
8466 {
8467 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8468 if (fAccess != IEM_ACCESS_INVALID)
8469 {
8470 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8471 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8472 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8473 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8474 Assert(pVCpu->iem.s.cActiveMappings > 0);
8475 pVCpu->iem.s.cActiveMappings--;
8476 }
8477 }
8478}
8479
8480
8481/**
8482 * Fetches a data byte.
8483 *
8484 * @returns Strict VBox status code.
8485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8486 * @param pu8Dst Where to return the byte.
8487 * @param iSegReg The index of the segment register to use for
8488 * this access. The base and limits are checked.
8489 * @param GCPtrMem The address of the guest memory.
8490 */
8491IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8492{
8493 /* The lazy approach for now... */
8494 uint8_t const *pu8Src;
8495 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8496 if (rc == VINF_SUCCESS)
8497 {
8498 *pu8Dst = *pu8Src;
8499 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8500 }
8501 return rc;
8502}
8503
8504
8505#ifdef IEM_WITH_SETJMP
8506/**
8507 * Fetches a data byte, longjmp on error.
8508 *
8509 * @returns The byte.
8510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8511 * @param iSegReg The index of the segment register to use for
8512 * this access. The base and limits are checked.
8513 * @param GCPtrMem The address of the guest memory.
8514 */
8515DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8516{
8517 /* The lazy approach for now... */
8518 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8519 uint8_t const bRet = *pu8Src;
8520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8521 return bRet;
8522}
8523#endif /* IEM_WITH_SETJMP */
8524
8525
8526/**
8527 * Fetches a data word.
8528 *
8529 * @returns Strict VBox status code.
8530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8531 * @param pu16Dst Where to return the word.
8532 * @param iSegReg The index of the segment register to use for
8533 * this access. The base and limits are checked.
8534 * @param GCPtrMem The address of the guest memory.
8535 */
8536IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8537{
8538 /* The lazy approach for now... */
8539 uint16_t const *pu16Src;
8540 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8541 if (rc == VINF_SUCCESS)
8542 {
8543 *pu16Dst = *pu16Src;
8544 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8545 }
8546 return rc;
8547}
8548
8549
8550#ifdef IEM_WITH_SETJMP
8551/**
8552 * Fetches a data word, longjmp on error.
8553 *
8554 * @returns The word
8555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8556 * @param iSegReg The index of the segment register to use for
8557 * this access. The base and limits are checked.
8558 * @param GCPtrMem The address of the guest memory.
8559 */
8560DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8561{
8562 /* The lazy approach for now... */
8563 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8564 uint16_t const u16Ret = *pu16Src;
8565 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8566 return u16Ret;
8567}
8568#endif
8569
8570
8571/**
8572 * Fetches a data dword.
8573 *
8574 * @returns Strict VBox status code.
8575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8576 * @param pu32Dst Where to return the dword.
8577 * @param iSegReg The index of the segment register to use for
8578 * this access. The base and limits are checked.
8579 * @param GCPtrMem The address of the guest memory.
8580 */
8581IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8582{
8583 /* The lazy approach for now... */
8584 uint32_t const *pu32Src;
8585 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8586 if (rc == VINF_SUCCESS)
8587 {
8588 *pu32Dst = *pu32Src;
8589 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8590 }
8591 return rc;
8592}
8593
8594
8595#ifdef IEM_WITH_SETJMP
8596
8597IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8598{
8599 Assert(cbMem >= 1);
8600 Assert(iSegReg < X86_SREG_COUNT);
8601
8602 /*
8603 * 64-bit mode is simpler.
8604 */
8605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8606 {
8607 if (iSegReg >= X86_SREG_FS)
8608 {
8609 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8610 GCPtrMem += pSel->u64Base;
8611 }
8612
8613 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8614 return GCPtrMem;
8615 }
8616 /*
8617 * 16-bit and 32-bit segmentation.
8618 */
8619 else
8620 {
8621 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8622 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8623 == X86DESCATTR_P /* data, expand up */
8624 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8625 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8626 {
8627 /* expand up */
8628 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8629 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8630 && GCPtrLast32 > (uint32_t)GCPtrMem))
8631 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8632 }
8633 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8634 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8635 {
8636 /* expand down */
8637 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8638 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8639 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8640 && GCPtrLast32 > (uint32_t)GCPtrMem))
8641 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8642 }
8643 else
8644 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8645 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8646 }
8647 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8648}
8649
8650
8651IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8652{
8653 Assert(cbMem >= 1);
8654 Assert(iSegReg < X86_SREG_COUNT);
8655
8656 /*
8657 * 64-bit mode is simpler.
8658 */
8659 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8660 {
8661 if (iSegReg >= X86_SREG_FS)
8662 {
8663 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8664 GCPtrMem += pSel->u64Base;
8665 }
8666
8667 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8668 return GCPtrMem;
8669 }
8670 /*
8671 * 16-bit and 32-bit segmentation.
8672 */
8673 else
8674 {
8675 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8676 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8677 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8678 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8679 {
8680 /* expand up */
8681 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8682 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8683 && GCPtrLast32 > (uint32_t)GCPtrMem))
8684 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8685 }
8686 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8687 {
8688 /* expand down */
8689 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8690 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8691 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8692 && GCPtrLast32 > (uint32_t)GCPtrMem))
8693 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8694 }
8695 else
8696 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8697 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8698 }
8699 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8700}
8701
8702
8703/**
8704 * Fetches a data dword, longjmp on error, fallback/safe version.
8705 *
8706 * @returns The dword
8707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8708 * @param iSegReg The index of the segment register to use for
8709 * this access. The base and limits are checked.
8710 * @param GCPtrMem The address of the guest memory.
8711 */
8712IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8713{
8714 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8715 uint32_t const u32Ret = *pu32Src;
8716 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8717 return u32Ret;
8718}
8719
8720
8721/**
8722 * Fetches a data dword, longjmp on error.
8723 *
8724 * @returns The dword
8725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8726 * @param iSegReg The index of the segment register to use for
8727 * this access. The base and limits are checked.
8728 * @param GCPtrMem The address of the guest memory.
8729 */
8730DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8731{
8732# ifdef IEM_WITH_DATA_TLB
8733 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8734 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8735 {
8736 /// @todo more later.
8737 }
8738
8739 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8740# else
8741 /* The lazy approach. */
8742 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8743 uint32_t const u32Ret = *pu32Src;
8744 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8745 return u32Ret;
8746# endif
8747}
8748#endif
8749
8750
8751#ifdef SOME_UNUSED_FUNCTION
8752/**
8753 * Fetches a data dword and sign extends it to a qword.
8754 *
8755 * @returns Strict VBox status code.
8756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8757 * @param pu64Dst Where to return the sign extended value.
8758 * @param iSegReg The index of the segment register to use for
8759 * this access. The base and limits are checked.
8760 * @param GCPtrMem The address of the guest memory.
8761 */
8762IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8763{
8764 /* The lazy approach for now... */
8765 int32_t const *pi32Src;
8766 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8767 if (rc == VINF_SUCCESS)
8768 {
8769 *pu64Dst = *pi32Src;
8770 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8771 }
8772#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8773 else
8774 *pu64Dst = 0;
8775#endif
8776 return rc;
8777}
8778#endif
8779
8780
8781/**
8782 * Fetches a data qword.
8783 *
8784 * @returns Strict VBox status code.
8785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8786 * @param pu64Dst Where to return the qword.
8787 * @param iSegReg The index of the segment register to use for
8788 * this access. The base and limits are checked.
8789 * @param GCPtrMem The address of the guest memory.
8790 */
8791IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8792{
8793 /* The lazy approach for now... */
8794 uint64_t const *pu64Src;
8795 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8796 if (rc == VINF_SUCCESS)
8797 {
8798 *pu64Dst = *pu64Src;
8799 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8800 }
8801 return rc;
8802}
8803
8804
8805#ifdef IEM_WITH_SETJMP
8806/**
8807 * Fetches a data qword, longjmp on error.
8808 *
8809 * @returns The qword.
8810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8811 * @param iSegReg The index of the segment register to use for
8812 * this access. The base and limits are checked.
8813 * @param GCPtrMem The address of the guest memory.
8814 */
8815DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8816{
8817 /* The lazy approach for now... */
8818 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8819 uint64_t const u64Ret = *pu64Src;
8820 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8821 return u64Ret;
8822}
8823#endif
8824
8825
8826/**
8827 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8828 *
8829 * @returns Strict VBox status code.
8830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8831 * @param pu64Dst Where to return the qword.
8832 * @param iSegReg The index of the segment register to use for
8833 * this access. The base and limits are checked.
8834 * @param GCPtrMem The address of the guest memory.
8835 */
8836IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8837{
8838 /* The lazy approach for now... */
8839 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8840 if (RT_UNLIKELY(GCPtrMem & 15))
8841 return iemRaiseGeneralProtectionFault0(pVCpu);
8842
8843 uint64_t const *pu64Src;
8844 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8845 if (rc == VINF_SUCCESS)
8846 {
8847 *pu64Dst = *pu64Src;
8848 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8849 }
8850 return rc;
8851}
8852
8853
8854#ifdef IEM_WITH_SETJMP
8855/**
8856 * Fetches a data qword, longjmp on error.
8857 *
8858 * @returns The qword.
8859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8860 * @param iSegReg The index of the segment register to use for
8861 * this access. The base and limits are checked.
8862 * @param GCPtrMem The address of the guest memory.
8863 */
8864DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8865{
8866 /* The lazy approach for now... */
8867 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8868 if (RT_LIKELY(!(GCPtrMem & 15)))
8869 {
8870 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8871 uint64_t const u64Ret = *pu64Src;
8872 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8873 return u64Ret;
8874 }
8875
8876 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
8877 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
8878}
8879#endif
8880
8881
8882/**
8883 * Fetches a data tword.
8884 *
8885 * @returns Strict VBox status code.
8886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8887 * @param pr80Dst Where to return the tword.
8888 * @param iSegReg The index of the segment register to use for
8889 * this access. The base and limits are checked.
8890 * @param GCPtrMem The address of the guest memory.
8891 */
8892IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8893{
8894 /* The lazy approach for now... */
8895 PCRTFLOAT80U pr80Src;
8896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8897 if (rc == VINF_SUCCESS)
8898 {
8899 *pr80Dst = *pr80Src;
8900 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8901 }
8902 return rc;
8903}
8904
8905
8906#ifdef IEM_WITH_SETJMP
8907/**
8908 * Fetches a data tword, longjmp on error.
8909 *
8910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8911 * @param pr80Dst Where to return the tword.
8912 * @param iSegReg The index of the segment register to use for
8913 * this access. The base and limits are checked.
8914 * @param GCPtrMem The address of the guest memory.
8915 */
8916DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8917{
8918 /* The lazy approach for now... */
8919 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8920 *pr80Dst = *pr80Src;
8921 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8922}
8923#endif
8924
8925
8926/**
8927 * Fetches a data dqword (double qword), generally SSE related.
8928 *
8929 * @returns Strict VBox status code.
8930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8931 * @param pu128Dst Where to return the qword.
8932 * @param iSegReg The index of the segment register to use for
8933 * this access. The base and limits are checked.
8934 * @param GCPtrMem The address of the guest memory.
8935 */
8936IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8937{
8938 /* The lazy approach for now... */
8939 uint128_t const *pu128Src;
8940 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8941 if (rc == VINF_SUCCESS)
8942 {
8943 *pu128Dst = *pu128Src;
8944 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8945 }
8946 return rc;
8947}
8948
8949
8950#ifdef IEM_WITH_SETJMP
8951/**
8952 * Fetches a data dqword (double qword), generally SSE related.
8953 *
8954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8955 * @param pu128Dst Where to return the qword.
8956 * @param iSegReg The index of the segment register to use for
8957 * this access. The base and limits are checked.
8958 * @param GCPtrMem The address of the guest memory.
8959 */
8960IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8961{
8962 /* The lazy approach for now... */
8963 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8964 *pu128Dst = *pu128Src;
8965 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8966}
8967#endif
8968
8969
8970/**
8971 * Fetches a data dqword (double qword) at an aligned address, generally SSE
8972 * related.
8973 *
8974 * Raises \#GP(0) if not aligned.
8975 *
8976 * @returns Strict VBox status code.
8977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8978 * @param pu128Dst Where to return the qword.
8979 * @param iSegReg The index of the segment register to use for
8980 * this access. The base and limits are checked.
8981 * @param GCPtrMem The address of the guest memory.
8982 */
8983IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8984{
8985 /* The lazy approach for now... */
8986 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8987 if ( (GCPtrMem & 15)
8988 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
8989 return iemRaiseGeneralProtectionFault0(pVCpu);
8990
8991 uint128_t const *pu128Src;
8992 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8993 if (rc == VINF_SUCCESS)
8994 {
8995 *pu128Dst = *pu128Src;
8996 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8997 }
8998 return rc;
8999}
9000
9001
9002#ifdef IEM_WITH_SETJMP
9003/**
9004 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9005 * related, longjmp on error.
9006 *
9007 * Raises \#GP(0) if not aligned.
9008 *
9009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9010 * @param pu128Dst Where to return the qword.
9011 * @param iSegReg The index of the segment register to use for
9012 * this access. The base and limits are checked.
9013 * @param GCPtrMem The address of the guest memory.
9014 */
9015DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9016{
9017 /* The lazy approach for now... */
9018 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9019 if ( (GCPtrMem & 15) == 0
9020 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9021 {
9022 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9023 IEM_ACCESS_DATA_R);
9024 *pu128Dst = *pu128Src;
9025 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9026 return;
9027 }
9028
9029 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9030 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9031}
9032#endif
9033
9034
9035
9036/**
9037 * Fetches a descriptor register (lgdt, lidt).
9038 *
9039 * @returns Strict VBox status code.
9040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9041 * @param pcbLimit Where to return the limit.
9042 * @param pGCPtrBase Where to return the base.
9043 * @param iSegReg The index of the segment register to use for
9044 * this access. The base and limits are checked.
9045 * @param GCPtrMem The address of the guest memory.
9046 * @param enmOpSize The effective operand size.
9047 */
9048IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9049 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9050{
9051 /*
9052 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9053 * little special:
9054 * - The two reads are done separately.
9055 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9056 * - We suspect the 386 to actually commit the limit before the base in
9057 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9058 * don't try emulate this eccentric behavior, because it's not well
9059 * enough understood and rather hard to trigger.
9060 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9061 */
9062 VBOXSTRICTRC rcStrict;
9063 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9064 {
9065 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9066 if (rcStrict == VINF_SUCCESS)
9067 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9068 }
9069 else
9070 {
9071 uint32_t uTmp;
9072 if (enmOpSize == IEMMODE_32BIT)
9073 {
9074 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9075 {
9076 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9077 if (rcStrict == VINF_SUCCESS)
9078 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9079 }
9080 else
9081 {
9082 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9083 if (rcStrict == VINF_SUCCESS)
9084 {
9085 *pcbLimit = (uint16_t)uTmp;
9086 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9087 }
9088 }
9089 if (rcStrict == VINF_SUCCESS)
9090 *pGCPtrBase = uTmp;
9091 }
9092 else
9093 {
9094 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9095 if (rcStrict == VINF_SUCCESS)
9096 {
9097 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9098 if (rcStrict == VINF_SUCCESS)
9099 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9100 }
9101 }
9102 }
9103 return rcStrict;
9104}
9105
9106
9107
9108/**
9109 * Stores a data byte.
9110 *
9111 * @returns Strict VBox status code.
9112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9113 * @param iSegReg The index of the segment register to use for
9114 * this access. The base and limits are checked.
9115 * @param GCPtrMem The address of the guest memory.
9116 * @param u8Value The value to store.
9117 */
9118IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9119{
9120 /* The lazy approach for now... */
9121 uint8_t *pu8Dst;
9122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9123 if (rc == VINF_SUCCESS)
9124 {
9125 *pu8Dst = u8Value;
9126 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9127 }
9128 return rc;
9129}
9130
9131
9132#ifdef IEM_WITH_SETJMP
9133/**
9134 * Stores a data byte, longjmp on error.
9135 *
9136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9137 * @param iSegReg The index of the segment register to use for
9138 * this access. The base and limits are checked.
9139 * @param GCPtrMem The address of the guest memory.
9140 * @param u8Value The value to store.
9141 */
9142IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9143{
9144 /* The lazy approach for now... */
9145 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9146 *pu8Dst = u8Value;
9147 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9148}
9149#endif
9150
9151
9152/**
9153 * Stores a data word.
9154 *
9155 * @returns Strict VBox status code.
9156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9157 * @param iSegReg The index of the segment register to use for
9158 * this access. The base and limits are checked.
9159 * @param GCPtrMem The address of the guest memory.
9160 * @param u16Value The value to store.
9161 */
9162IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9163{
9164 /* The lazy approach for now... */
9165 uint16_t *pu16Dst;
9166 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9167 if (rc == VINF_SUCCESS)
9168 {
9169 *pu16Dst = u16Value;
9170 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9171 }
9172 return rc;
9173}
9174
9175
9176#ifdef IEM_WITH_SETJMP
9177/**
9178 * Stores a data word, longjmp on error.
9179 *
9180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9181 * @param iSegReg The index of the segment register to use for
9182 * this access. The base and limits are checked.
9183 * @param GCPtrMem The address of the guest memory.
9184 * @param u16Value The value to store.
9185 */
9186IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9187{
9188 /* The lazy approach for now... */
9189 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9190 *pu16Dst = u16Value;
9191 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9192}
9193#endif
9194
9195
9196/**
9197 * Stores a data dword.
9198 *
9199 * @returns Strict VBox status code.
9200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9201 * @param iSegReg The index of the segment register to use for
9202 * this access. The base and limits are checked.
9203 * @param GCPtrMem The address of the guest memory.
9204 * @param u32Value The value to store.
9205 */
9206IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9207{
9208 /* The lazy approach for now... */
9209 uint32_t *pu32Dst;
9210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9211 if (rc == VINF_SUCCESS)
9212 {
9213 *pu32Dst = u32Value;
9214 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9215 }
9216 return rc;
9217}
9218
9219
9220#ifdef IEM_WITH_SETJMP
9221/**
9222 * Stores a data dword.
9223 *
9224 * @returns Strict VBox status code.
9225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9226 * @param iSegReg The index of the segment register to use for
9227 * this access. The base and limits are checked.
9228 * @param GCPtrMem The address of the guest memory.
9229 * @param u32Value The value to store.
9230 */
9231IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9232{
9233 /* The lazy approach for now... */
9234 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9235 *pu32Dst = u32Value;
9236 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9237}
9238#endif
9239
9240
9241/**
9242 * Stores a data qword.
9243 *
9244 * @returns Strict VBox status code.
9245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9246 * @param iSegReg The index of the segment register to use for
9247 * this access. The base and limits are checked.
9248 * @param GCPtrMem The address of the guest memory.
9249 * @param u64Value The value to store.
9250 */
9251IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9252{
9253 /* The lazy approach for now... */
9254 uint64_t *pu64Dst;
9255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9256 if (rc == VINF_SUCCESS)
9257 {
9258 *pu64Dst = u64Value;
9259 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9260 }
9261 return rc;
9262}
9263
9264
9265#ifdef IEM_WITH_SETJMP
9266/**
9267 * Stores a data qword, longjmp on error.
9268 *
9269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9270 * @param iSegReg The index of the segment register to use for
9271 * this access. The base and limits are checked.
9272 * @param GCPtrMem The address of the guest memory.
9273 * @param u64Value The value to store.
9274 */
9275IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9276{
9277 /* The lazy approach for now... */
9278 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9279 *pu64Dst = u64Value;
9280 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9281}
9282#endif
9283
9284
9285/**
9286 * Stores a data dqword.
9287 *
9288 * @returns Strict VBox status code.
9289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9290 * @param iSegReg The index of the segment register to use for
9291 * this access. The base and limits are checked.
9292 * @param GCPtrMem The address of the guest memory.
9293 * @param u128Value The value to store.
9294 */
9295IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9296{
9297 /* The lazy approach for now... */
9298 uint128_t *pu128Dst;
9299 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9300 if (rc == VINF_SUCCESS)
9301 {
9302 *pu128Dst = u128Value;
9303 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9304 }
9305 return rc;
9306}
9307
9308
9309#ifdef IEM_WITH_SETJMP
9310/**
9311 * Stores a data dqword, longjmp on error.
9312 *
9313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9314 * @param iSegReg The index of the segment register to use for
9315 * this access. The base and limits are checked.
9316 * @param GCPtrMem The address of the guest memory.
9317 * @param u128Value The value to store.
9318 */
9319IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9320{
9321 /* The lazy approach for now... */
9322 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9323 *pu128Dst = u128Value;
9324 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9325}
9326#endif
9327
9328
9329/**
9330 * Stores a data dqword, SSE aligned.
9331 *
9332 * @returns Strict VBox status code.
9333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9334 * @param iSegReg The index of the segment register to use for
9335 * this access. The base and limits are checked.
9336 * @param GCPtrMem The address of the guest memory.
9337 * @param u128Value The value to store.
9338 */
9339IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9340{
9341 /* The lazy approach for now... */
9342 if ( (GCPtrMem & 15)
9343 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9344 return iemRaiseGeneralProtectionFault0(pVCpu);
9345
9346 uint128_t *pu128Dst;
9347 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9348 if (rc == VINF_SUCCESS)
9349 {
9350 *pu128Dst = u128Value;
9351 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9352 }
9353 return rc;
9354}
9355
9356
9357#ifdef IEM_WITH_SETJMP
9358/**
9359 * Stores a data dqword, SSE aligned.
9360 *
9361 * @returns Strict VBox status code.
9362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9363 * @param iSegReg The index of the segment register to use for
9364 * this access. The base and limits are checked.
9365 * @param GCPtrMem The address of the guest memory.
9366 * @param u128Value The value to store.
9367 */
9368DECL_NO_INLINE(IEM_STATIC, void)
9369iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9370{
9371 /* The lazy approach for now... */
9372 if ( (GCPtrMem & 15) == 0
9373 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9374 {
9375 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9376 *pu128Dst = u128Value;
9377 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9378 return;
9379 }
9380
9381 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9382 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9383}
9384#endif
9385
9386
9387/**
9388 * Stores a descriptor register (sgdt, sidt).
9389 *
9390 * @returns Strict VBox status code.
9391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9392 * @param cbLimit The limit.
9393 * @param GCPtrBase The base address.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 */
9398IEM_STATIC VBOXSTRICTRC
9399iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9400{
9401 /*
9402 * The SIDT and SGDT instructions actually stores the data using two
9403 * independent writes. The instructions does not respond to opsize prefixes.
9404 */
9405 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9406 if (rcStrict == VINF_SUCCESS)
9407 {
9408 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9409 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9410 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9411 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9412 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9413 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9414 else
9415 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9416 }
9417 return rcStrict;
9418}
9419
9420
9421/**
9422 * Pushes a word onto the stack.
9423 *
9424 * @returns Strict VBox status code.
9425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9426 * @param u16Value The value to push.
9427 */
9428IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9429{
9430 /* Increment the stack pointer. */
9431 uint64_t uNewRsp;
9432 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9433 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9434
9435 /* Write the word the lazy way. */
9436 uint16_t *pu16Dst;
9437 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9438 if (rc == VINF_SUCCESS)
9439 {
9440 *pu16Dst = u16Value;
9441 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9442 }
9443
9444 /* Commit the new RSP value unless we an access handler made trouble. */
9445 if (rc == VINF_SUCCESS)
9446 pCtx->rsp = uNewRsp;
9447
9448 return rc;
9449}
9450
9451
9452/**
9453 * Pushes a dword onto the stack.
9454 *
9455 * @returns Strict VBox status code.
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param u32Value The value to push.
9458 */
9459IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9460{
9461 /* Increment the stack pointer. */
9462 uint64_t uNewRsp;
9463 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9464 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9465
9466 /* Write the dword the lazy way. */
9467 uint32_t *pu32Dst;
9468 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9469 if (rc == VINF_SUCCESS)
9470 {
9471 *pu32Dst = u32Value;
9472 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9473 }
9474
9475 /* Commit the new RSP value unless we an access handler made trouble. */
9476 if (rc == VINF_SUCCESS)
9477 pCtx->rsp = uNewRsp;
9478
9479 return rc;
9480}
9481
9482
9483/**
9484 * Pushes a dword segment register value onto the stack.
9485 *
9486 * @returns Strict VBox status code.
9487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9488 * @param u32Value The value to push.
9489 */
9490IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9491{
9492 /* Increment the stack pointer. */
9493 uint64_t uNewRsp;
9494 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9495 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9496
9497 VBOXSTRICTRC rc;
9498 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9499 {
9500 /* The recompiler writes a full dword. */
9501 uint32_t *pu32Dst;
9502 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9503 if (rc == VINF_SUCCESS)
9504 {
9505 *pu32Dst = u32Value;
9506 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9507 }
9508 }
9509 else
9510 {
9511 /* The intel docs talks about zero extending the selector register
9512 value. My actual intel CPU here might be zero extending the value
9513 but it still only writes the lower word... */
9514 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9515 * happens when crossing an electric page boundrary, is the high word checked
9516 * for write accessibility or not? Probably it is. What about segment limits?
9517 * It appears this behavior is also shared with trap error codes.
9518 *
9519 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9520 * ancient hardware when it actually did change. */
9521 uint16_t *pu16Dst;
9522 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9523 if (rc == VINF_SUCCESS)
9524 {
9525 *pu16Dst = (uint16_t)u32Value;
9526 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9527 }
9528 }
9529
9530 /* Commit the new RSP value unless we an access handler made trouble. */
9531 if (rc == VINF_SUCCESS)
9532 pCtx->rsp = uNewRsp;
9533
9534 return rc;
9535}
9536
9537
9538/**
9539 * Pushes a qword onto the stack.
9540 *
9541 * @returns Strict VBox status code.
9542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9543 * @param u64Value The value to push.
9544 */
9545IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9546{
9547 /* Increment the stack pointer. */
9548 uint64_t uNewRsp;
9549 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9550 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9551
9552 /* Write the word the lazy way. */
9553 uint64_t *pu64Dst;
9554 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9555 if (rc == VINF_SUCCESS)
9556 {
9557 *pu64Dst = u64Value;
9558 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9559 }
9560
9561 /* Commit the new RSP value unless we an access handler made trouble. */
9562 if (rc == VINF_SUCCESS)
9563 pCtx->rsp = uNewRsp;
9564
9565 return rc;
9566}
9567
9568
9569/**
9570 * Pops a word from the stack.
9571 *
9572 * @returns Strict VBox status code.
9573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9574 * @param pu16Value Where to store the popped value.
9575 */
9576IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9577{
9578 /* Increment the stack pointer. */
9579 uint64_t uNewRsp;
9580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9581 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9582
9583 /* Write the word the lazy way. */
9584 uint16_t const *pu16Src;
9585 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9586 if (rc == VINF_SUCCESS)
9587 {
9588 *pu16Value = *pu16Src;
9589 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9590
9591 /* Commit the new RSP value. */
9592 if (rc == VINF_SUCCESS)
9593 pCtx->rsp = uNewRsp;
9594 }
9595
9596 return rc;
9597}
9598
9599
9600/**
9601 * Pops a dword from the stack.
9602 *
9603 * @returns Strict VBox status code.
9604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9605 * @param pu32Value Where to store the popped value.
9606 */
9607IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9608{
9609 /* Increment the stack pointer. */
9610 uint64_t uNewRsp;
9611 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9612 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9613
9614 /* Write the word the lazy way. */
9615 uint32_t const *pu32Src;
9616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9617 if (rc == VINF_SUCCESS)
9618 {
9619 *pu32Value = *pu32Src;
9620 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9621
9622 /* Commit the new RSP value. */
9623 if (rc == VINF_SUCCESS)
9624 pCtx->rsp = uNewRsp;
9625 }
9626
9627 return rc;
9628}
9629
9630
9631/**
9632 * Pops a qword from the stack.
9633 *
9634 * @returns Strict VBox status code.
9635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9636 * @param pu64Value Where to store the popped value.
9637 */
9638IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9639{
9640 /* Increment the stack pointer. */
9641 uint64_t uNewRsp;
9642 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9643 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9644
9645 /* Write the word the lazy way. */
9646 uint64_t const *pu64Src;
9647 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9648 if (rc == VINF_SUCCESS)
9649 {
9650 *pu64Value = *pu64Src;
9651 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9652
9653 /* Commit the new RSP value. */
9654 if (rc == VINF_SUCCESS)
9655 pCtx->rsp = uNewRsp;
9656 }
9657
9658 return rc;
9659}
9660
9661
9662/**
9663 * Pushes a word onto the stack, using a temporary stack pointer.
9664 *
9665 * @returns Strict VBox status code.
9666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9667 * @param u16Value The value to push.
9668 * @param pTmpRsp Pointer to the temporary stack pointer.
9669 */
9670IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9671{
9672 /* Increment the stack pointer. */
9673 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9674 RTUINT64U NewRsp = *pTmpRsp;
9675 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9676
9677 /* Write the word the lazy way. */
9678 uint16_t *pu16Dst;
9679 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9680 if (rc == VINF_SUCCESS)
9681 {
9682 *pu16Dst = u16Value;
9683 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9684 }
9685
9686 /* Commit the new RSP value unless we an access handler made trouble. */
9687 if (rc == VINF_SUCCESS)
9688 *pTmpRsp = NewRsp;
9689
9690 return rc;
9691}
9692
9693
9694/**
9695 * Pushes a dword onto the stack, using a temporary stack pointer.
9696 *
9697 * @returns Strict VBox status code.
9698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9699 * @param u32Value The value to push.
9700 * @param pTmpRsp Pointer to the temporary stack pointer.
9701 */
9702IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9703{
9704 /* Increment the stack pointer. */
9705 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9706 RTUINT64U NewRsp = *pTmpRsp;
9707 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9708
9709 /* Write the word the lazy way. */
9710 uint32_t *pu32Dst;
9711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9712 if (rc == VINF_SUCCESS)
9713 {
9714 *pu32Dst = u32Value;
9715 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9716 }
9717
9718 /* Commit the new RSP value unless we an access handler made trouble. */
9719 if (rc == VINF_SUCCESS)
9720 *pTmpRsp = NewRsp;
9721
9722 return rc;
9723}
9724
9725
9726/**
9727 * Pushes a dword onto the stack, using a temporary stack pointer.
9728 *
9729 * @returns Strict VBox status code.
9730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9731 * @param u64Value The value to push.
9732 * @param pTmpRsp Pointer to the temporary stack pointer.
9733 */
9734IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9735{
9736 /* Increment the stack pointer. */
9737 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9738 RTUINT64U NewRsp = *pTmpRsp;
9739 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9740
9741 /* Write the word the lazy way. */
9742 uint64_t *pu64Dst;
9743 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9744 if (rc == VINF_SUCCESS)
9745 {
9746 *pu64Dst = u64Value;
9747 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9748 }
9749
9750 /* Commit the new RSP value unless we an access handler made trouble. */
9751 if (rc == VINF_SUCCESS)
9752 *pTmpRsp = NewRsp;
9753
9754 return rc;
9755}
9756
9757
9758/**
9759 * Pops a word from the stack, using a temporary stack pointer.
9760 *
9761 * @returns Strict VBox status code.
9762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9763 * @param pu16Value Where to store the popped value.
9764 * @param pTmpRsp Pointer to the temporary stack pointer.
9765 */
9766IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9767{
9768 /* Increment the stack pointer. */
9769 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9770 RTUINT64U NewRsp = *pTmpRsp;
9771 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9772
9773 /* Write the word the lazy way. */
9774 uint16_t const *pu16Src;
9775 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9776 if (rc == VINF_SUCCESS)
9777 {
9778 *pu16Value = *pu16Src;
9779 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9780
9781 /* Commit the new RSP value. */
9782 if (rc == VINF_SUCCESS)
9783 *pTmpRsp = NewRsp;
9784 }
9785
9786 return rc;
9787}
9788
9789
9790/**
9791 * Pops a dword from the stack, using a temporary stack pointer.
9792 *
9793 * @returns Strict VBox status code.
9794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9795 * @param pu32Value Where to store the popped value.
9796 * @param pTmpRsp Pointer to the temporary stack pointer.
9797 */
9798IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9799{
9800 /* Increment the stack pointer. */
9801 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9802 RTUINT64U NewRsp = *pTmpRsp;
9803 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9804
9805 /* Write the word the lazy way. */
9806 uint32_t const *pu32Src;
9807 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9808 if (rc == VINF_SUCCESS)
9809 {
9810 *pu32Value = *pu32Src;
9811 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9812
9813 /* Commit the new RSP value. */
9814 if (rc == VINF_SUCCESS)
9815 *pTmpRsp = NewRsp;
9816 }
9817
9818 return rc;
9819}
9820
9821
9822/**
9823 * Pops a qword from the stack, using a temporary stack pointer.
9824 *
9825 * @returns Strict VBox status code.
9826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9827 * @param pu64Value Where to store the popped value.
9828 * @param pTmpRsp Pointer to the temporary stack pointer.
9829 */
9830IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9831{
9832 /* Increment the stack pointer. */
9833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9834 RTUINT64U NewRsp = *pTmpRsp;
9835 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9836
9837 /* Write the word the lazy way. */
9838 uint64_t const *pu64Src;
9839 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9840 if (rcStrict == VINF_SUCCESS)
9841 {
9842 *pu64Value = *pu64Src;
9843 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9844
9845 /* Commit the new RSP value. */
9846 if (rcStrict == VINF_SUCCESS)
9847 *pTmpRsp = NewRsp;
9848 }
9849
9850 return rcStrict;
9851}
9852
9853
9854/**
9855 * Begin a special stack push (used by interrupt, exceptions and such).
9856 *
9857 * This will raise \#SS or \#PF if appropriate.
9858 *
9859 * @returns Strict VBox status code.
9860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9861 * @param cbMem The number of bytes to push onto the stack.
9862 * @param ppvMem Where to return the pointer to the stack memory.
9863 * As with the other memory functions this could be
9864 * direct access or bounce buffered access, so
9865 * don't commit register until the commit call
9866 * succeeds.
9867 * @param puNewRsp Where to return the new RSP value. This must be
9868 * passed unchanged to
9869 * iemMemStackPushCommitSpecial().
9870 */
9871IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
9872{
9873 Assert(cbMem < UINT8_MAX);
9874 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9875 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9876 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9877}
9878
9879
9880/**
9881 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
9882 *
9883 * This will update the rSP.
9884 *
9885 * @returns Strict VBox status code.
9886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9887 * @param pvMem The pointer returned by
9888 * iemMemStackPushBeginSpecial().
9889 * @param uNewRsp The new RSP value returned by
9890 * iemMemStackPushBeginSpecial().
9891 */
9892IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
9893{
9894 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
9895 if (rcStrict == VINF_SUCCESS)
9896 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9897 return rcStrict;
9898}
9899
9900
9901/**
9902 * Begin a special stack pop (used by iret, retf and such).
9903 *
9904 * This will raise \#SS or \#PF if appropriate.
9905 *
9906 * @returns Strict VBox status code.
9907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9908 * @param cbMem The number of bytes to push onto the stack.
9909 * @param ppvMem Where to return the pointer to the stack memory.
9910 * @param puNewRsp Where to return the new RSP value. This must be
9911 * passed unchanged to
9912 * iemMemStackPopCommitSpecial() or applied
9913 * manually if iemMemStackPopDoneSpecial() is used.
9914 */
9915IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9916{
9917 Assert(cbMem < UINT8_MAX);
9918 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9919 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9920 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9921}
9922
9923
9924/**
9925 * Continue a special stack pop (used by iret and retf).
9926 *
9927 * This will raise \#SS or \#PF if appropriate.
9928 *
9929 * @returns Strict VBox status code.
9930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9931 * @param cbMem The number of bytes to push onto the stack.
9932 * @param ppvMem Where to return the pointer to the stack memory.
9933 * @param puNewRsp Where to return the new RSP value. This must be
9934 * passed unchanged to
9935 * iemMemStackPopCommitSpecial() or applied
9936 * manually if iemMemStackPopDoneSpecial() is used.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9939{
9940 Assert(cbMem < UINT8_MAX);
9941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9942 RTUINT64U NewRsp;
9943 NewRsp.u = *puNewRsp;
9944 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9945 *puNewRsp = NewRsp.u;
9946 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9947}
9948
9949
9950/**
9951 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
9952 *
9953 * This will update the rSP.
9954 *
9955 * @returns Strict VBox status code.
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param pvMem The pointer returned by
9958 * iemMemStackPopBeginSpecial().
9959 * @param uNewRsp The new RSP value returned by
9960 * iemMemStackPopBeginSpecial().
9961 */
9962IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PVMCPU pVCpu, void const *pvMem, uint64_t uNewRsp)
9963{
9964 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9965 if (rcStrict == VINF_SUCCESS)
9966 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9967 return rcStrict;
9968}
9969
9970
9971/**
9972 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
9973 * iemMemStackPopContinueSpecial).
9974 *
9975 * The caller will manually commit the rSP.
9976 *
9977 * @returns Strict VBox status code.
9978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9979 * @param pvMem The pointer returned by
9980 * iemMemStackPopBeginSpecial() or
9981 * iemMemStackPopContinueSpecial().
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
9984{
9985 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9986}
9987
9988
9989/**
9990 * Fetches a system table byte.
9991 *
9992 * @returns Strict VBox status code.
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param pbDst Where to return the byte.
9995 * @param iSegReg The index of the segment register to use for
9996 * this access. The base and limits are checked.
9997 * @param GCPtrMem The address of the guest memory.
9998 */
9999IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10000{
10001 /* The lazy approach for now... */
10002 uint8_t const *pbSrc;
10003 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10004 if (rc == VINF_SUCCESS)
10005 {
10006 *pbDst = *pbSrc;
10007 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10008 }
10009 return rc;
10010}
10011
10012
10013/**
10014 * Fetches a system table word.
10015 *
10016 * @returns Strict VBox status code.
10017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10018 * @param pu16Dst Where to return the word.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 */
10023IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10024{
10025 /* The lazy approach for now... */
10026 uint16_t const *pu16Src;
10027 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10028 if (rc == VINF_SUCCESS)
10029 {
10030 *pu16Dst = *pu16Src;
10031 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10032 }
10033 return rc;
10034}
10035
10036
10037/**
10038 * Fetches a system table dword.
10039 *
10040 * @returns Strict VBox status code.
10041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10042 * @param pu32Dst Where to return the dword.
10043 * @param iSegReg The index of the segment register to use for
10044 * this access. The base and limits are checked.
10045 * @param GCPtrMem The address of the guest memory.
10046 */
10047IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10048{
10049 /* The lazy approach for now... */
10050 uint32_t const *pu32Src;
10051 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10052 if (rc == VINF_SUCCESS)
10053 {
10054 *pu32Dst = *pu32Src;
10055 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10056 }
10057 return rc;
10058}
10059
10060
10061/**
10062 * Fetches a system table qword.
10063 *
10064 * @returns Strict VBox status code.
10065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10066 * @param pu64Dst Where to return the qword.
10067 * @param iSegReg The index of the segment register to use for
10068 * this access. The base and limits are checked.
10069 * @param GCPtrMem The address of the guest memory.
10070 */
10071IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10072{
10073 /* The lazy approach for now... */
10074 uint64_t const *pu64Src;
10075 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10076 if (rc == VINF_SUCCESS)
10077 {
10078 *pu64Dst = *pu64Src;
10079 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10080 }
10081 return rc;
10082}
10083
10084
10085/**
10086 * Fetches a descriptor table entry with caller specified error code.
10087 *
10088 * @returns Strict VBox status code.
10089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10090 * @param pDesc Where to return the descriptor table entry.
10091 * @param uSel The selector which table entry to fetch.
10092 * @param uXcpt The exception to raise on table lookup error.
10093 * @param uErrorCode The error code associated with the exception.
10094 */
10095IEM_STATIC VBOXSTRICTRC
10096iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10097{
10098 AssertPtr(pDesc);
10099 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10100
10101 /** @todo did the 286 require all 8 bytes to be accessible? */
10102 /*
10103 * Get the selector table base and check bounds.
10104 */
10105 RTGCPTR GCPtrBase;
10106 if (uSel & X86_SEL_LDT)
10107 {
10108 if ( !pCtx->ldtr.Attr.n.u1Present
10109 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10110 {
10111 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10112 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10113 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10114 uErrorCode, 0);
10115 }
10116
10117 Assert(pCtx->ldtr.Attr.n.u1Present);
10118 GCPtrBase = pCtx->ldtr.u64Base;
10119 }
10120 else
10121 {
10122 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10123 {
10124 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10125 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10126 uErrorCode, 0);
10127 }
10128 GCPtrBase = pCtx->gdtr.pGdt;
10129 }
10130
10131 /*
10132 * Read the legacy descriptor and maybe the long mode extensions if
10133 * required.
10134 */
10135 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10136 if (rcStrict == VINF_SUCCESS)
10137 {
10138 if ( !IEM_IS_LONG_MODE(pVCpu)
10139 || pDesc->Legacy.Gen.u1DescType)
10140 pDesc->Long.au64[1] = 0;
10141 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10142 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10143 else
10144 {
10145 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10146 /** @todo is this the right exception? */
10147 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10148 }
10149 }
10150 return rcStrict;
10151}
10152
10153
10154/**
10155 * Fetches a descriptor table entry.
10156 *
10157 * @returns Strict VBox status code.
10158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10159 * @param pDesc Where to return the descriptor table entry.
10160 * @param uSel The selector which table entry to fetch.
10161 * @param uXcpt The exception to raise on table lookup error.
10162 */
10163IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10164{
10165 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10166}
10167
10168
10169/**
10170 * Fakes a long mode stack selector for SS = 0.
10171 *
10172 * @param pDescSs Where to return the fake stack descriptor.
10173 * @param uDpl The DPL we want.
10174 */
10175IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10176{
10177 pDescSs->Long.au64[0] = 0;
10178 pDescSs->Long.au64[1] = 0;
10179 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10180 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10181 pDescSs->Long.Gen.u2Dpl = uDpl;
10182 pDescSs->Long.Gen.u1Present = 1;
10183 pDescSs->Long.Gen.u1Long = 1;
10184}
10185
10186
10187/**
10188 * Marks the selector descriptor as accessed (only non-system descriptors).
10189 *
10190 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10191 * will therefore skip the limit checks.
10192 *
10193 * @returns Strict VBox status code.
10194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10195 * @param uSel The selector.
10196 */
10197IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10198{
10199 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10200
10201 /*
10202 * Get the selector table base and calculate the entry address.
10203 */
10204 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10205 ? pCtx->ldtr.u64Base
10206 : pCtx->gdtr.pGdt;
10207 GCPtr += uSel & X86_SEL_MASK;
10208
10209 /*
10210 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10211 * ugly stuff to avoid this. This will make sure it's an atomic access
10212 * as well more or less remove any question about 8-bit or 32-bit accesss.
10213 */
10214 VBOXSTRICTRC rcStrict;
10215 uint32_t volatile *pu32;
10216 if ((GCPtr & 3) == 0)
10217 {
10218 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10219 GCPtr += 2 + 2;
10220 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10221 if (rcStrict != VINF_SUCCESS)
10222 return rcStrict;
10223 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10224 }
10225 else
10226 {
10227 /* The misaligned GDT/LDT case, map the whole thing. */
10228 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10229 if (rcStrict != VINF_SUCCESS)
10230 return rcStrict;
10231 switch ((uintptr_t)pu32 & 3)
10232 {
10233 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10234 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10235 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10236 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10237 }
10238 }
10239
10240 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10241}
10242
10243/** @} */
10244
10245
10246/*
10247 * Include the C/C++ implementation of instruction.
10248 */
10249#include "IEMAllCImpl.cpp.h"
10250
10251
10252
10253/** @name "Microcode" macros.
10254 *
10255 * The idea is that we should be able to use the same code to interpret
10256 * instructions as well as recompiler instructions. Thus this obfuscation.
10257 *
10258 * @{
10259 */
10260#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10261#define IEM_MC_END() }
10262#define IEM_MC_PAUSE() do {} while (0)
10263#define IEM_MC_CONTINUE() do {} while (0)
10264
10265/** Internal macro. */
10266#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10267 do \
10268 { \
10269 VBOXSTRICTRC rcStrict2 = a_Expr; \
10270 if (rcStrict2 != VINF_SUCCESS) \
10271 return rcStrict2; \
10272 } while (0)
10273
10274
10275#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10276#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10277#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10278#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10279#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10280#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10281#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10282#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10283#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10284 do { \
10285 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10286 return iemRaiseDeviceNotAvailable(pVCpu); \
10287 } while (0)
10288#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10289 do { \
10290 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10291 return iemRaiseMathFault(pVCpu); \
10292 } while (0)
10293#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10294 do { \
10295 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10296 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10297 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10298 return iemRaiseUndefinedOpcode(pVCpu); \
10299 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10300 return iemRaiseDeviceNotAvailable(pVCpu); \
10301 } while (0)
10302#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10303 do { \
10304 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10305 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10306 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10307 return iemRaiseUndefinedOpcode(pVCpu); \
10308 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10309 return iemRaiseDeviceNotAvailable(pVCpu); \
10310 } while (0)
10311#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10312 do { \
10313 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10314 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10315 return iemRaiseUndefinedOpcode(pVCpu); \
10316 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10317 return iemRaiseDeviceNotAvailable(pVCpu); \
10318 } while (0)
10319#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10320 do { \
10321 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10322 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10323 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10324 return iemRaiseUndefinedOpcode(pVCpu); \
10325 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10326 return iemRaiseDeviceNotAvailable(pVCpu); \
10327 } while (0)
10328#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10329 do { \
10330 if (pVCpu->iem.s.uCpl != 0) \
10331 return iemRaiseGeneralProtectionFault0(pVCpu); \
10332 } while (0)
10333
10334
10335#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10336#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10337#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10338#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10339#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10340#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10341#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10342 uint32_t a_Name; \
10343 uint32_t *a_pName = &a_Name
10344#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10345 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10346
10347#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10348#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10349
10350#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10351#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10352#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10353#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10354#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10355#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10356#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10357#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10358#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10359#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10360#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10361#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10362#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10363#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10364#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10365#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10366#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10367#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10368#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10369#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10370#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10371#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10372#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10373#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10374#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10375#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10376#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10377#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10378#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10379/** @note Not for IOPL or IF testing or modification. */
10380#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10381#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10382#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10383#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10384
10385#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10386#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10387#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10388#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10389#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10390#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10391#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10392#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10393#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10394#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10395#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10396 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10397
10398#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10399#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10400/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10401 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10402#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10403#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10404/** @note Not for IOPL or IF testing or modification. */
10405#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10406
10407#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10408#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10409#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10410 do { \
10411 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10412 *pu32Reg += (a_u32Value); \
10413 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10414 } while (0)
10415#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10416
10417#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10418#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10419#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10420 do { \
10421 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10422 *pu32Reg -= (a_u32Value); \
10423 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10424 } while (0)
10425#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10426#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10427
10428#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10429#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10430#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10431#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10432#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10433#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10434#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10435
10436#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10437#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10438#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10439#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10440
10441#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10442#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10443#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10444
10445#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10446#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10447#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10448
10449#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10450#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10451#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10452
10453#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10454#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10455#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10456
10457#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10458
10459#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10460
10461#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10462#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10463#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10464 do { \
10465 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10466 *pu32Reg &= (a_u32Value); \
10467 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10468 } while (0)
10469#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10470
10471#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10472#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10473#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10474 do { \
10475 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10476 *pu32Reg |= (a_u32Value); \
10477 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10478 } while (0)
10479#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10480
10481
10482/** @note Not for IOPL or IF modification. */
10483#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10484/** @note Not for IOPL or IF modification. */
10485#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10486/** @note Not for IOPL or IF modification. */
10487#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10488
10489#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10490
10491
10492#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10493 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10494#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10495 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10496#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10497 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10498#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10499 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10500#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10501 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10502#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10503 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10504#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10505 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10506
10507#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10508 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10509#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10510 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10511#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10512 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10513#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10514 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10515#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10516 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10517#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10518 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10519 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10520 } while (0)
10521#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10522 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10523 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10524 } while (0)
10525#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10526 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10527#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10528 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10529#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10530 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10531#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10532 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10533 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10534
10535#ifndef IEM_WITH_SETJMP
10536# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10537 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10538# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10539 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10540# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10541 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10542#else
10543# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10544 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10545# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10546 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10547# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10548 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10549#endif
10550
10551#ifndef IEM_WITH_SETJMP
10552# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10554# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10555 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10556# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10557 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10558#else
10559# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10560 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10561# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10562 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10563# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10564 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10565#endif
10566
10567#ifndef IEM_WITH_SETJMP
10568# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10569 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10570# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10572# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10574#else
10575# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10576 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10577# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10578 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10579# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10580 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10581#endif
10582
10583#ifdef SOME_UNUSED_FUNCTION
10584# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10585 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10586#endif
10587
10588#ifndef IEM_WITH_SETJMP
10589# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10591# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10593# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10595# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10597#else
10598# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10599 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10600# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10601 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10602# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10603 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10604# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10605 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10606#endif
10607
10608#ifndef IEM_WITH_SETJMP
10609# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10610 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10611# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10613# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10615#else
10616# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10617 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10618# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10619 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10620# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10621 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10622#endif
10623
10624#ifndef IEM_WITH_SETJMP
10625# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10626 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10627# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10629#else
10630# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10631 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10632# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10633 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10634#endif
10635
10636
10637
10638#ifndef IEM_WITH_SETJMP
10639# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10640 do { \
10641 uint8_t u8Tmp; \
10642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10643 (a_u16Dst) = u8Tmp; \
10644 } while (0)
10645# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10646 do { \
10647 uint8_t u8Tmp; \
10648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10649 (a_u32Dst) = u8Tmp; \
10650 } while (0)
10651# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10652 do { \
10653 uint8_t u8Tmp; \
10654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10655 (a_u64Dst) = u8Tmp; \
10656 } while (0)
10657# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10658 do { \
10659 uint16_t u16Tmp; \
10660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10661 (a_u32Dst) = u16Tmp; \
10662 } while (0)
10663# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10664 do { \
10665 uint16_t u16Tmp; \
10666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10667 (a_u64Dst) = u16Tmp; \
10668 } while (0)
10669# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10670 do { \
10671 uint32_t u32Tmp; \
10672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10673 (a_u64Dst) = u32Tmp; \
10674 } while (0)
10675#else /* IEM_WITH_SETJMP */
10676# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10677 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10678# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10679 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10680# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10681 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10682# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10683 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10684# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10685 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10686# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10687 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10688#endif /* IEM_WITH_SETJMP */
10689
10690#ifndef IEM_WITH_SETJMP
10691# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10692 do { \
10693 uint8_t u8Tmp; \
10694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10695 (a_u16Dst) = (int8_t)u8Tmp; \
10696 } while (0)
10697# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10698 do { \
10699 uint8_t u8Tmp; \
10700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10701 (a_u32Dst) = (int8_t)u8Tmp; \
10702 } while (0)
10703# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10704 do { \
10705 uint8_t u8Tmp; \
10706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10707 (a_u64Dst) = (int8_t)u8Tmp; \
10708 } while (0)
10709# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10710 do { \
10711 uint16_t u16Tmp; \
10712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10713 (a_u32Dst) = (int16_t)u16Tmp; \
10714 } while (0)
10715# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10716 do { \
10717 uint16_t u16Tmp; \
10718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10719 (a_u64Dst) = (int16_t)u16Tmp; \
10720 } while (0)
10721# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10722 do { \
10723 uint32_t u32Tmp; \
10724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10725 (a_u64Dst) = (int32_t)u32Tmp; \
10726 } while (0)
10727#else /* IEM_WITH_SETJMP */
10728# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10729 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10730# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10731 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10732# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10733 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10734# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10735 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10736# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10737 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10738# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10739 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10740#endif /* IEM_WITH_SETJMP */
10741
10742#ifndef IEM_WITH_SETJMP
10743# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10744 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10745# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10746 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10747# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10748 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10749# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10750 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10751#else
10752# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10753 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10754# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10755 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10756# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10757 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10758# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10759 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10760#endif
10761
10762#ifndef IEM_WITH_SETJMP
10763# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10764 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10765# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10766 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10767# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10768 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10769# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10770 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10771#else
10772# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10773 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10774# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10775 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10776# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10777 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10778# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10779 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10780#endif
10781
10782#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10783#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10784#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10785#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10786#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10787#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10788#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10789 do { \
10790 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10791 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10792 } while (0)
10793
10794#ifndef IEM_WITH_SETJMP
10795# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10796 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10797# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10799#else
10800# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10801 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10802# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10803 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10804#endif
10805
10806
10807#define IEM_MC_PUSH_U16(a_u16Value) \
10808 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10809#define IEM_MC_PUSH_U32(a_u32Value) \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10811#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10812 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10813#define IEM_MC_PUSH_U64(a_u64Value) \
10814 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10815
10816#define IEM_MC_POP_U16(a_pu16Value) \
10817 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10818#define IEM_MC_POP_U32(a_pu32Value) \
10819 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10820#define IEM_MC_POP_U64(a_pu64Value) \
10821 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10822
10823/** Maps guest memory for direct or bounce buffered access.
10824 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10825 * @remarks May return.
10826 */
10827#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10828 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10829
10830/** Maps guest memory for direct or bounce buffered access.
10831 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10832 * @remarks May return.
10833 */
10834#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10835 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10836
10837/** Commits the memory and unmaps the guest memory.
10838 * @remarks May return.
10839 */
10840#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10841 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10842
10843/** Commits the memory and unmaps the guest memory unless the FPU status word
10844 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10845 * that would cause FLD not to store.
10846 *
10847 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10848 * store, while \#P will not.
10849 *
10850 * @remarks May in theory return - for now.
10851 */
10852#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10853 do { \
10854 if ( !(a_u16FSW & X86_FSW_ES) \
10855 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10856 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10857 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10858 } while (0)
10859
10860/** Calculate efficient address from R/M. */
10861#ifndef IEM_WITH_SETJMP
10862# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10863 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10864#else
10865# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10866 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10867#endif
10868
10869#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10870#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10871#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10872#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10873#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10874#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10875#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10876
10877/**
10878 * Defers the rest of the instruction emulation to a C implementation routine
10879 * and returns, only taking the standard parameters.
10880 *
10881 * @param a_pfnCImpl The pointer to the C routine.
10882 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10883 */
10884#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10885
10886/**
10887 * Defers the rest of instruction emulation to a C implementation routine and
10888 * returns, taking one argument in addition to the standard ones.
10889 *
10890 * @param a_pfnCImpl The pointer to the C routine.
10891 * @param a0 The argument.
10892 */
10893#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10894
10895/**
10896 * Defers the rest of the instruction emulation to a C implementation routine
10897 * and returns, taking two arguments in addition to the standard ones.
10898 *
10899 * @param a_pfnCImpl The pointer to the C routine.
10900 * @param a0 The first extra argument.
10901 * @param a1 The second extra argument.
10902 */
10903#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10904
10905/**
10906 * Defers the rest of the instruction emulation to a C implementation routine
10907 * and returns, taking three arguments in addition to the standard ones.
10908 *
10909 * @param a_pfnCImpl The pointer to the C routine.
10910 * @param a0 The first extra argument.
10911 * @param a1 The second extra argument.
10912 * @param a2 The third extra argument.
10913 */
10914#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10915
10916/**
10917 * Defers the rest of the instruction emulation to a C implementation routine
10918 * and returns, taking four arguments in addition to the standard ones.
10919 *
10920 * @param a_pfnCImpl The pointer to the C routine.
10921 * @param a0 The first extra argument.
10922 * @param a1 The second extra argument.
10923 * @param a2 The third extra argument.
10924 * @param a3 The fourth extra argument.
10925 */
10926#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
10927
10928/**
10929 * Defers the rest of the instruction emulation to a C implementation routine
10930 * and returns, taking two arguments in addition to the standard ones.
10931 *
10932 * @param a_pfnCImpl The pointer to the C routine.
10933 * @param a0 The first extra argument.
10934 * @param a1 The second extra argument.
10935 * @param a2 The third extra argument.
10936 * @param a3 The fourth extra argument.
10937 * @param a4 The fifth extra argument.
10938 */
10939#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
10940
10941/**
10942 * Defers the entire instruction emulation to a C implementation routine and
10943 * returns, only taking the standard parameters.
10944 *
10945 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10946 *
10947 * @param a_pfnCImpl The pointer to the C routine.
10948 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10949 */
10950#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10951
10952/**
10953 * Defers the entire instruction emulation to a C implementation routine and
10954 * returns, taking one argument in addition to the standard ones.
10955 *
10956 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10957 *
10958 * @param a_pfnCImpl The pointer to the C routine.
10959 * @param a0 The argument.
10960 */
10961#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10962
10963/**
10964 * Defers the entire instruction emulation to a C implementation routine and
10965 * returns, taking two arguments in addition to the standard ones.
10966 *
10967 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10968 *
10969 * @param a_pfnCImpl The pointer to the C routine.
10970 * @param a0 The first extra argument.
10971 * @param a1 The second extra argument.
10972 */
10973#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10974
10975/**
10976 * Defers the entire instruction emulation to a C implementation routine and
10977 * returns, taking three arguments in addition to the standard ones.
10978 *
10979 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10980 *
10981 * @param a_pfnCImpl The pointer to the C routine.
10982 * @param a0 The first extra argument.
10983 * @param a1 The second extra argument.
10984 * @param a2 The third extra argument.
10985 */
10986#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10987
10988/**
10989 * Calls a FPU assembly implementation taking one visible argument.
10990 *
10991 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10992 * @param a0 The first extra argument.
10993 */
10994#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
10995 do { \
10996 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
10997 } while (0)
10998
10999/**
11000 * Calls a FPU assembly implementation taking two visible arguments.
11001 *
11002 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11003 * @param a0 The first extra argument.
11004 * @param a1 The second extra argument.
11005 */
11006#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11007 do { \
11008 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11009 } while (0)
11010
11011/**
11012 * Calls a FPU assembly implementation taking three visible arguments.
11013 *
11014 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11015 * @param a0 The first extra argument.
11016 * @param a1 The second extra argument.
11017 * @param a2 The third extra argument.
11018 */
11019#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11020 do { \
11021 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11022 } while (0)
11023
11024#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11025 do { \
11026 (a_FpuData).FSW = (a_FSW); \
11027 (a_FpuData).r80Result = *(a_pr80Value); \
11028 } while (0)
11029
11030/** Pushes FPU result onto the stack. */
11031#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11032 iemFpuPushResult(pVCpu, &a_FpuData)
11033/** Pushes FPU result onto the stack and sets the FPUDP. */
11034#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11035 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11036
11037/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11038#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11039 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11040
11041/** Stores FPU result in a stack register. */
11042#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11043 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11044/** Stores FPU result in a stack register and pops the stack. */
11045#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11046 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11047/** Stores FPU result in a stack register and sets the FPUDP. */
11048#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11049 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11050/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11051 * stack. */
11052#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11053 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11054
11055/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11056#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11057 iemFpuUpdateOpcodeAndIp(pVCpu)
11058/** Free a stack register (for FFREE and FFREEP). */
11059#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11060 iemFpuStackFree(pVCpu, a_iStReg)
11061/** Increment the FPU stack pointer. */
11062#define IEM_MC_FPU_STACK_INC_TOP() \
11063 iemFpuStackIncTop(pVCpu)
11064/** Decrement the FPU stack pointer. */
11065#define IEM_MC_FPU_STACK_DEC_TOP() \
11066 iemFpuStackDecTop(pVCpu)
11067
11068/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11069#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11070 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11071/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11072#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11073 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11074/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11075#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11076 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11077/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11078#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11079 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11080/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11081 * stack. */
11082#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11083 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11084/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11085#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11086 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11087
11088/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11089#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11090 iemFpuStackUnderflow(pVCpu, a_iStDst)
11091/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11092 * stack. */
11093#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11094 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11095/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11096 * FPUDS. */
11097#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11098 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11099/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11100 * FPUDS. Pops stack. */
11101#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11102 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11103/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11104 * stack twice. */
11105#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11106 iemFpuStackUnderflowThenPopPop(pVCpu)
11107/** Raises a FPU stack underflow exception for an instruction pushing a result
11108 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11109#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11110 iemFpuStackPushUnderflow(pVCpu)
11111/** Raises a FPU stack underflow exception for an instruction pushing a result
11112 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11113#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11114 iemFpuStackPushUnderflowTwo(pVCpu)
11115
11116/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11117 * FPUIP, FPUCS and FOP. */
11118#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11119 iemFpuStackPushOverflow(pVCpu)
11120/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11121 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11122#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11123 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11124/** Prepares for using the FPU state.
11125 * Ensures that we can use the host FPU in the current context (RC+R0.
11126 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11127#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11128/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11129#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11130/** Actualizes the guest FPU state so it can be accessed and modified. */
11131#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11132
11133/** Prepares for using the SSE state.
11134 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11135 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11136#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11137/** Actualizes the guest XMM0..15 register state for read-only access. */
11138#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11139/** Actualizes the guest XMM0..15 register state for read-write access. */
11140#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11141
11142/**
11143 * Calls a MMX assembly implementation taking two visible arguments.
11144 *
11145 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11146 * @param a0 The first extra argument.
11147 * @param a1 The second extra argument.
11148 */
11149#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11150 do { \
11151 IEM_MC_PREPARE_FPU_USAGE(); \
11152 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11153 } while (0)
11154
11155/**
11156 * Calls a MMX assembly implementation taking three visible arguments.
11157 *
11158 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11159 * @param a0 The first extra argument.
11160 * @param a1 The second extra argument.
11161 * @param a2 The third extra argument.
11162 */
11163#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11164 do { \
11165 IEM_MC_PREPARE_FPU_USAGE(); \
11166 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11167 } while (0)
11168
11169
11170/**
11171 * Calls a SSE assembly implementation taking two visible arguments.
11172 *
11173 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11174 * @param a0 The first extra argument.
11175 * @param a1 The second extra argument.
11176 */
11177#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11178 do { \
11179 IEM_MC_PREPARE_SSE_USAGE(); \
11180 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11181 } while (0)
11182
11183/**
11184 * Calls a SSE assembly implementation taking three visible arguments.
11185 *
11186 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11187 * @param a0 The first extra argument.
11188 * @param a1 The second extra argument.
11189 * @param a2 The third extra argument.
11190 */
11191#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11192 do { \
11193 IEM_MC_PREPARE_SSE_USAGE(); \
11194 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11195 } while (0)
11196
11197/** @note Not for IOPL or IF testing. */
11198#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11199/** @note Not for IOPL or IF testing. */
11200#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11201/** @note Not for IOPL or IF testing. */
11202#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11203/** @note Not for IOPL or IF testing. */
11204#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11205/** @note Not for IOPL or IF testing. */
11206#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11207 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11208 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11209/** @note Not for IOPL or IF testing. */
11210#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11211 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11212 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11213/** @note Not for IOPL or IF testing. */
11214#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11215 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11216 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11217 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11218/** @note Not for IOPL or IF testing. */
11219#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11220 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11221 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11222 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11223#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11224#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11225#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11226/** @note Not for IOPL or IF testing. */
11227#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11228 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11229 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11230/** @note Not for IOPL or IF testing. */
11231#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11232 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11233 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11234/** @note Not for IOPL or IF testing. */
11235#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11236 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11237 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11238/** @note Not for IOPL or IF testing. */
11239#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11240 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11241 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11242/** @note Not for IOPL or IF testing. */
11243#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11244 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11245 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11246/** @note Not for IOPL or IF testing. */
11247#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11248 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11249 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11250#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11251#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11252
11253#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11254 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11255#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11256 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11257#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11258 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11259#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11260 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11261#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11262 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11263#define IEM_MC_IF_FCW_IM() \
11264 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11265
11266#define IEM_MC_ELSE() } else {
11267#define IEM_MC_ENDIF() } do {} while (0)
11268
11269/** @} */
11270
11271
11272/** @name Opcode Debug Helpers.
11273 * @{
11274 */
11275#ifdef DEBUG
11276# define IEMOP_MNEMONIC(a_szMnemonic) \
11277 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11278 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11279# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11280 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11281 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11282#else
11283# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11284# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11285#endif
11286
11287/** @} */
11288
11289
11290/** @name Opcode Helpers.
11291 * @{
11292 */
11293
11294#ifdef IN_RING3
11295# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11296 do { \
11297 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11298 else \
11299 { \
11300 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11301 return IEMOP_RAISE_INVALID_OPCODE(); \
11302 } \
11303 } while (0)
11304#else
11305# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11306 do { \
11307 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11308 else return IEMOP_RAISE_INVALID_OPCODE(); \
11309 } while (0)
11310#endif
11311
11312/** The instruction requires a 186 or later. */
11313#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11314# define IEMOP_HLP_MIN_186() do { } while (0)
11315#else
11316# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11317#endif
11318
11319/** The instruction requires a 286 or later. */
11320#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11321# define IEMOP_HLP_MIN_286() do { } while (0)
11322#else
11323# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11324#endif
11325
11326/** The instruction requires a 386 or later. */
11327#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11328# define IEMOP_HLP_MIN_386() do { } while (0)
11329#else
11330# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11331#endif
11332
11333/** The instruction requires a 386 or later if the given expression is true. */
11334#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11335# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11336#else
11337# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11338#endif
11339
11340/** The instruction requires a 486 or later. */
11341#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11342# define IEMOP_HLP_MIN_486() do { } while (0)
11343#else
11344# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11345#endif
11346
11347/** The instruction requires a Pentium (586) or later. */
11348#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11349# define IEMOP_HLP_MIN_586() do { } while (0)
11350#else
11351# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11352#endif
11353
11354/** The instruction requires a PentiumPro (686) or later. */
11355#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11356# define IEMOP_HLP_MIN_686() do { } while (0)
11357#else
11358# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11359#endif
11360
11361
11362/** The instruction raises an \#UD in real and V8086 mode. */
11363#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11364 do \
11365 { \
11366 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11367 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11368 } while (0)
11369
11370/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11371 * 64-bit mode. */
11372#define IEMOP_HLP_NO_64BIT() \
11373 do \
11374 { \
11375 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11376 return IEMOP_RAISE_INVALID_OPCODE(); \
11377 } while (0)
11378
11379/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11380 * 64-bit mode. */
11381#define IEMOP_HLP_ONLY_64BIT() \
11382 do \
11383 { \
11384 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11385 return IEMOP_RAISE_INVALID_OPCODE(); \
11386 } while (0)
11387
11388/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11389#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11390 do \
11391 { \
11392 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11393 iemRecalEffOpSize64Default(pVCpu); \
11394 } while (0)
11395
11396/** The instruction has 64-bit operand size if 64-bit mode. */
11397#define IEMOP_HLP_64BIT_OP_SIZE() \
11398 do \
11399 { \
11400 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11401 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11402 } while (0)
11403
11404/** Only a REX prefix immediately preceeding the first opcode byte takes
11405 * effect. This macro helps ensuring this as well as logging bad guest code. */
11406#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11407 do \
11408 { \
11409 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11410 { \
11411 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11412 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11413 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11414 pVCpu->iem.s.uRexB = 0; \
11415 pVCpu->iem.s.uRexIndex = 0; \
11416 pVCpu->iem.s.uRexReg = 0; \
11417 iemRecalEffOpSize(pVCpu); \
11418 } \
11419 } while (0)
11420
11421/**
11422 * Done decoding.
11423 */
11424#define IEMOP_HLP_DONE_DECODING() \
11425 do \
11426 { \
11427 /*nothing for now, maybe later... */ \
11428 } while (0)
11429
11430/**
11431 * Done decoding, raise \#UD exception if lock prefix present.
11432 */
11433#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11434 do \
11435 { \
11436 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11437 { /* likely */ } \
11438 else \
11439 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11440 } while (0)
11441#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11442 do \
11443 { \
11444 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11445 { /* likely */ } \
11446 else \
11447 { \
11448 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11449 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11450 } \
11451 } while (0)
11452#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11453 do \
11454 { \
11455 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11456 { /* likely */ } \
11457 else \
11458 { \
11459 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11460 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11461 } \
11462 } while (0)
11463
11464/**
11465 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11466 * are present.
11467 */
11468#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11469 do \
11470 { \
11471 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11472 { /* likely */ } \
11473 else \
11474 return IEMOP_RAISE_INVALID_OPCODE(); \
11475 } while (0)
11476
11477
11478/**
11479 * Calculates the effective address of a ModR/M memory operand.
11480 *
11481 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11482 *
11483 * @return Strict VBox status code.
11484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11485 * @param bRm The ModRM byte.
11486 * @param cbImm The size of any immediate following the
11487 * effective address opcode bytes. Important for
11488 * RIP relative addressing.
11489 * @param pGCPtrEff Where to return the effective address.
11490 */
11491IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11492{
11493 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11494 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11495# define SET_SS_DEF() \
11496 do \
11497 { \
11498 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11499 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11500 } while (0)
11501
11502 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11503 {
11504/** @todo Check the effective address size crap! */
11505 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11506 {
11507 uint16_t u16EffAddr;
11508
11509 /* Handle the disp16 form with no registers first. */
11510 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11511 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11512 else
11513 {
11514 /* Get the displacment. */
11515 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11516 {
11517 case 0: u16EffAddr = 0; break;
11518 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11519 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11520 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11521 }
11522
11523 /* Add the base and index registers to the disp. */
11524 switch (bRm & X86_MODRM_RM_MASK)
11525 {
11526 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11527 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11528 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11529 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11530 case 4: u16EffAddr += pCtx->si; break;
11531 case 5: u16EffAddr += pCtx->di; break;
11532 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11533 case 7: u16EffAddr += pCtx->bx; break;
11534 }
11535 }
11536
11537 *pGCPtrEff = u16EffAddr;
11538 }
11539 else
11540 {
11541 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11542 uint32_t u32EffAddr;
11543
11544 /* Handle the disp32 form with no registers first. */
11545 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11546 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11547 else
11548 {
11549 /* Get the register (or SIB) value. */
11550 switch ((bRm & X86_MODRM_RM_MASK))
11551 {
11552 case 0: u32EffAddr = pCtx->eax; break;
11553 case 1: u32EffAddr = pCtx->ecx; break;
11554 case 2: u32EffAddr = pCtx->edx; break;
11555 case 3: u32EffAddr = pCtx->ebx; break;
11556 case 4: /* SIB */
11557 {
11558 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11559
11560 /* Get the index and scale it. */
11561 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11562 {
11563 case 0: u32EffAddr = pCtx->eax; break;
11564 case 1: u32EffAddr = pCtx->ecx; break;
11565 case 2: u32EffAddr = pCtx->edx; break;
11566 case 3: u32EffAddr = pCtx->ebx; break;
11567 case 4: u32EffAddr = 0; /*none */ break;
11568 case 5: u32EffAddr = pCtx->ebp; break;
11569 case 6: u32EffAddr = pCtx->esi; break;
11570 case 7: u32EffAddr = pCtx->edi; break;
11571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11572 }
11573 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11574
11575 /* add base */
11576 switch (bSib & X86_SIB_BASE_MASK)
11577 {
11578 case 0: u32EffAddr += pCtx->eax; break;
11579 case 1: u32EffAddr += pCtx->ecx; break;
11580 case 2: u32EffAddr += pCtx->edx; break;
11581 case 3: u32EffAddr += pCtx->ebx; break;
11582 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11583 case 5:
11584 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11585 {
11586 u32EffAddr += pCtx->ebp;
11587 SET_SS_DEF();
11588 }
11589 else
11590 {
11591 uint32_t u32Disp;
11592 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11593 u32EffAddr += u32Disp;
11594 }
11595 break;
11596 case 6: u32EffAddr += pCtx->esi; break;
11597 case 7: u32EffAddr += pCtx->edi; break;
11598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11599 }
11600 break;
11601 }
11602 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11603 case 6: u32EffAddr = pCtx->esi; break;
11604 case 7: u32EffAddr = pCtx->edi; break;
11605 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11606 }
11607
11608 /* Get and add the displacement. */
11609 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11610 {
11611 case 0:
11612 break;
11613 case 1:
11614 {
11615 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11616 u32EffAddr += i8Disp;
11617 break;
11618 }
11619 case 2:
11620 {
11621 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11622 u32EffAddr += u32Disp;
11623 break;
11624 }
11625 default:
11626 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11627 }
11628
11629 }
11630 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11631 *pGCPtrEff = u32EffAddr;
11632 else
11633 {
11634 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11635 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11636 }
11637 }
11638 }
11639 else
11640 {
11641 uint64_t u64EffAddr;
11642
11643 /* Handle the rip+disp32 form with no registers first. */
11644 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11645 {
11646 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11647 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11648 }
11649 else
11650 {
11651 /* Get the register (or SIB) value. */
11652 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11653 {
11654 case 0: u64EffAddr = pCtx->rax; break;
11655 case 1: u64EffAddr = pCtx->rcx; break;
11656 case 2: u64EffAddr = pCtx->rdx; break;
11657 case 3: u64EffAddr = pCtx->rbx; break;
11658 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11659 case 6: u64EffAddr = pCtx->rsi; break;
11660 case 7: u64EffAddr = pCtx->rdi; break;
11661 case 8: u64EffAddr = pCtx->r8; break;
11662 case 9: u64EffAddr = pCtx->r9; break;
11663 case 10: u64EffAddr = pCtx->r10; break;
11664 case 11: u64EffAddr = pCtx->r11; break;
11665 case 13: u64EffAddr = pCtx->r13; break;
11666 case 14: u64EffAddr = pCtx->r14; break;
11667 case 15: u64EffAddr = pCtx->r15; break;
11668 /* SIB */
11669 case 4:
11670 case 12:
11671 {
11672 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11673
11674 /* Get the index and scale it. */
11675 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11676 {
11677 case 0: u64EffAddr = pCtx->rax; break;
11678 case 1: u64EffAddr = pCtx->rcx; break;
11679 case 2: u64EffAddr = pCtx->rdx; break;
11680 case 3: u64EffAddr = pCtx->rbx; break;
11681 case 4: u64EffAddr = 0; /*none */ break;
11682 case 5: u64EffAddr = pCtx->rbp; break;
11683 case 6: u64EffAddr = pCtx->rsi; break;
11684 case 7: u64EffAddr = pCtx->rdi; break;
11685 case 8: u64EffAddr = pCtx->r8; break;
11686 case 9: u64EffAddr = pCtx->r9; break;
11687 case 10: u64EffAddr = pCtx->r10; break;
11688 case 11: u64EffAddr = pCtx->r11; break;
11689 case 12: u64EffAddr = pCtx->r12; break;
11690 case 13: u64EffAddr = pCtx->r13; break;
11691 case 14: u64EffAddr = pCtx->r14; break;
11692 case 15: u64EffAddr = pCtx->r15; break;
11693 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11694 }
11695 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11696
11697 /* add base */
11698 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11699 {
11700 case 0: u64EffAddr += pCtx->rax; break;
11701 case 1: u64EffAddr += pCtx->rcx; break;
11702 case 2: u64EffAddr += pCtx->rdx; break;
11703 case 3: u64EffAddr += pCtx->rbx; break;
11704 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11705 case 6: u64EffAddr += pCtx->rsi; break;
11706 case 7: u64EffAddr += pCtx->rdi; break;
11707 case 8: u64EffAddr += pCtx->r8; break;
11708 case 9: u64EffAddr += pCtx->r9; break;
11709 case 10: u64EffAddr += pCtx->r10; break;
11710 case 11: u64EffAddr += pCtx->r11; break;
11711 case 12: u64EffAddr += pCtx->r12; break;
11712 case 14: u64EffAddr += pCtx->r14; break;
11713 case 15: u64EffAddr += pCtx->r15; break;
11714 /* complicated encodings */
11715 case 5:
11716 case 13:
11717 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11718 {
11719 if (!pVCpu->iem.s.uRexB)
11720 {
11721 u64EffAddr += pCtx->rbp;
11722 SET_SS_DEF();
11723 }
11724 else
11725 u64EffAddr += pCtx->r13;
11726 }
11727 else
11728 {
11729 uint32_t u32Disp;
11730 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11731 u64EffAddr += (int32_t)u32Disp;
11732 }
11733 break;
11734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11735 }
11736 break;
11737 }
11738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11739 }
11740
11741 /* Get and add the displacement. */
11742 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11743 {
11744 case 0:
11745 break;
11746 case 1:
11747 {
11748 int8_t i8Disp;
11749 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11750 u64EffAddr += i8Disp;
11751 break;
11752 }
11753 case 2:
11754 {
11755 uint32_t u32Disp;
11756 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11757 u64EffAddr += (int32_t)u32Disp;
11758 break;
11759 }
11760 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11761 }
11762
11763 }
11764
11765 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11766 *pGCPtrEff = u64EffAddr;
11767 else
11768 {
11769 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11770 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11771 }
11772 }
11773
11774 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11775 return VINF_SUCCESS;
11776}
11777
11778
11779/**
11780 * Calculates the effective address of a ModR/M memory operand.
11781 *
11782 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11783 *
11784 * @return Strict VBox status code.
11785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11786 * @param bRm The ModRM byte.
11787 * @param cbImm The size of any immediate following the
11788 * effective address opcode bytes. Important for
11789 * RIP relative addressing.
11790 * @param pGCPtrEff Where to return the effective address.
11791 * @param offRsp RSP displacement.
11792 */
11793IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11794{
11795 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11796 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11797# define SET_SS_DEF() \
11798 do \
11799 { \
11800 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11801 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11802 } while (0)
11803
11804 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11805 {
11806/** @todo Check the effective address size crap! */
11807 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11808 {
11809 uint16_t u16EffAddr;
11810
11811 /* Handle the disp16 form with no registers first. */
11812 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11813 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11814 else
11815 {
11816 /* Get the displacment. */
11817 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11818 {
11819 case 0: u16EffAddr = 0; break;
11820 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11821 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11822 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11823 }
11824
11825 /* Add the base and index registers to the disp. */
11826 switch (bRm & X86_MODRM_RM_MASK)
11827 {
11828 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11829 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11830 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11831 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11832 case 4: u16EffAddr += pCtx->si; break;
11833 case 5: u16EffAddr += pCtx->di; break;
11834 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11835 case 7: u16EffAddr += pCtx->bx; break;
11836 }
11837 }
11838
11839 *pGCPtrEff = u16EffAddr;
11840 }
11841 else
11842 {
11843 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11844 uint32_t u32EffAddr;
11845
11846 /* Handle the disp32 form with no registers first. */
11847 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11848 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11849 else
11850 {
11851 /* Get the register (or SIB) value. */
11852 switch ((bRm & X86_MODRM_RM_MASK))
11853 {
11854 case 0: u32EffAddr = pCtx->eax; break;
11855 case 1: u32EffAddr = pCtx->ecx; break;
11856 case 2: u32EffAddr = pCtx->edx; break;
11857 case 3: u32EffAddr = pCtx->ebx; break;
11858 case 4: /* SIB */
11859 {
11860 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11861
11862 /* Get the index and scale it. */
11863 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11864 {
11865 case 0: u32EffAddr = pCtx->eax; break;
11866 case 1: u32EffAddr = pCtx->ecx; break;
11867 case 2: u32EffAddr = pCtx->edx; break;
11868 case 3: u32EffAddr = pCtx->ebx; break;
11869 case 4: u32EffAddr = 0; /*none */ break;
11870 case 5: u32EffAddr = pCtx->ebp; break;
11871 case 6: u32EffAddr = pCtx->esi; break;
11872 case 7: u32EffAddr = pCtx->edi; break;
11873 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11874 }
11875 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11876
11877 /* add base */
11878 switch (bSib & X86_SIB_BASE_MASK)
11879 {
11880 case 0: u32EffAddr += pCtx->eax; break;
11881 case 1: u32EffAddr += pCtx->ecx; break;
11882 case 2: u32EffAddr += pCtx->edx; break;
11883 case 3: u32EffAddr += pCtx->ebx; break;
11884 case 4:
11885 u32EffAddr += pCtx->esp + offRsp;
11886 SET_SS_DEF();
11887 break;
11888 case 5:
11889 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11890 {
11891 u32EffAddr += pCtx->ebp;
11892 SET_SS_DEF();
11893 }
11894 else
11895 {
11896 uint32_t u32Disp;
11897 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11898 u32EffAddr += u32Disp;
11899 }
11900 break;
11901 case 6: u32EffAddr += pCtx->esi; break;
11902 case 7: u32EffAddr += pCtx->edi; break;
11903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11904 }
11905 break;
11906 }
11907 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11908 case 6: u32EffAddr = pCtx->esi; break;
11909 case 7: u32EffAddr = pCtx->edi; break;
11910 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11911 }
11912
11913 /* Get and add the displacement. */
11914 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11915 {
11916 case 0:
11917 break;
11918 case 1:
11919 {
11920 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11921 u32EffAddr += i8Disp;
11922 break;
11923 }
11924 case 2:
11925 {
11926 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11927 u32EffAddr += u32Disp;
11928 break;
11929 }
11930 default:
11931 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11932 }
11933
11934 }
11935 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11936 *pGCPtrEff = u32EffAddr;
11937 else
11938 {
11939 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11940 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11941 }
11942 }
11943 }
11944 else
11945 {
11946 uint64_t u64EffAddr;
11947
11948 /* Handle the rip+disp32 form with no registers first. */
11949 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11950 {
11951 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11952 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11953 }
11954 else
11955 {
11956 /* Get the register (or SIB) value. */
11957 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11958 {
11959 case 0: u64EffAddr = pCtx->rax; break;
11960 case 1: u64EffAddr = pCtx->rcx; break;
11961 case 2: u64EffAddr = pCtx->rdx; break;
11962 case 3: u64EffAddr = pCtx->rbx; break;
11963 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11964 case 6: u64EffAddr = pCtx->rsi; break;
11965 case 7: u64EffAddr = pCtx->rdi; break;
11966 case 8: u64EffAddr = pCtx->r8; break;
11967 case 9: u64EffAddr = pCtx->r9; break;
11968 case 10: u64EffAddr = pCtx->r10; break;
11969 case 11: u64EffAddr = pCtx->r11; break;
11970 case 13: u64EffAddr = pCtx->r13; break;
11971 case 14: u64EffAddr = pCtx->r14; break;
11972 case 15: u64EffAddr = pCtx->r15; break;
11973 /* SIB */
11974 case 4:
11975 case 12:
11976 {
11977 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11978
11979 /* Get the index and scale it. */
11980 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11981 {
11982 case 0: u64EffAddr = pCtx->rax; break;
11983 case 1: u64EffAddr = pCtx->rcx; break;
11984 case 2: u64EffAddr = pCtx->rdx; break;
11985 case 3: u64EffAddr = pCtx->rbx; break;
11986 case 4: u64EffAddr = 0; /*none */ break;
11987 case 5: u64EffAddr = pCtx->rbp; break;
11988 case 6: u64EffAddr = pCtx->rsi; break;
11989 case 7: u64EffAddr = pCtx->rdi; break;
11990 case 8: u64EffAddr = pCtx->r8; break;
11991 case 9: u64EffAddr = pCtx->r9; break;
11992 case 10: u64EffAddr = pCtx->r10; break;
11993 case 11: u64EffAddr = pCtx->r11; break;
11994 case 12: u64EffAddr = pCtx->r12; break;
11995 case 13: u64EffAddr = pCtx->r13; break;
11996 case 14: u64EffAddr = pCtx->r14; break;
11997 case 15: u64EffAddr = pCtx->r15; break;
11998 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11999 }
12000 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12001
12002 /* add base */
12003 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12004 {
12005 case 0: u64EffAddr += pCtx->rax; break;
12006 case 1: u64EffAddr += pCtx->rcx; break;
12007 case 2: u64EffAddr += pCtx->rdx; break;
12008 case 3: u64EffAddr += pCtx->rbx; break;
12009 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12010 case 6: u64EffAddr += pCtx->rsi; break;
12011 case 7: u64EffAddr += pCtx->rdi; break;
12012 case 8: u64EffAddr += pCtx->r8; break;
12013 case 9: u64EffAddr += pCtx->r9; break;
12014 case 10: u64EffAddr += pCtx->r10; break;
12015 case 11: u64EffAddr += pCtx->r11; break;
12016 case 12: u64EffAddr += pCtx->r12; break;
12017 case 14: u64EffAddr += pCtx->r14; break;
12018 case 15: u64EffAddr += pCtx->r15; break;
12019 /* complicated encodings */
12020 case 5:
12021 case 13:
12022 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12023 {
12024 if (!pVCpu->iem.s.uRexB)
12025 {
12026 u64EffAddr += pCtx->rbp;
12027 SET_SS_DEF();
12028 }
12029 else
12030 u64EffAddr += pCtx->r13;
12031 }
12032 else
12033 {
12034 uint32_t u32Disp;
12035 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12036 u64EffAddr += (int32_t)u32Disp;
12037 }
12038 break;
12039 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12040 }
12041 break;
12042 }
12043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12044 }
12045
12046 /* Get and add the displacement. */
12047 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12048 {
12049 case 0:
12050 break;
12051 case 1:
12052 {
12053 int8_t i8Disp;
12054 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12055 u64EffAddr += i8Disp;
12056 break;
12057 }
12058 case 2:
12059 {
12060 uint32_t u32Disp;
12061 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12062 u64EffAddr += (int32_t)u32Disp;
12063 break;
12064 }
12065 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12066 }
12067
12068 }
12069
12070 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12071 *pGCPtrEff = u64EffAddr;
12072 else
12073 {
12074 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12075 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12076 }
12077 }
12078
12079 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12080 return VINF_SUCCESS;
12081}
12082
12083
12084#ifdef IEM_WITH_SETJMP
12085/**
12086 * Calculates the effective address of a ModR/M memory operand.
12087 *
12088 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12089 *
12090 * May longjmp on internal error.
12091 *
12092 * @return The effective address.
12093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12094 * @param bRm The ModRM byte.
12095 * @param cbImm The size of any immediate following the
12096 * effective address opcode bytes. Important for
12097 * RIP relative addressing.
12098 */
12099IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12100{
12101 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12102 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12103# define SET_SS_DEF() \
12104 do \
12105 { \
12106 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12107 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12108 } while (0)
12109
12110 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12111 {
12112/** @todo Check the effective address size crap! */
12113 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12114 {
12115 uint16_t u16EffAddr;
12116
12117 /* Handle the disp16 form with no registers first. */
12118 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12119 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12120 else
12121 {
12122 /* Get the displacment. */
12123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12124 {
12125 case 0: u16EffAddr = 0; break;
12126 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12127 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12128 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12129 }
12130
12131 /* Add the base and index registers to the disp. */
12132 switch (bRm & X86_MODRM_RM_MASK)
12133 {
12134 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12135 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12136 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12137 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12138 case 4: u16EffAddr += pCtx->si; break;
12139 case 5: u16EffAddr += pCtx->di; break;
12140 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12141 case 7: u16EffAddr += pCtx->bx; break;
12142 }
12143 }
12144
12145 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12146 return u16EffAddr;
12147 }
12148
12149 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12150 uint32_t u32EffAddr;
12151
12152 /* Handle the disp32 form with no registers first. */
12153 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12154 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12155 else
12156 {
12157 /* Get the register (or SIB) value. */
12158 switch ((bRm & X86_MODRM_RM_MASK))
12159 {
12160 case 0: u32EffAddr = pCtx->eax; break;
12161 case 1: u32EffAddr = pCtx->ecx; break;
12162 case 2: u32EffAddr = pCtx->edx; break;
12163 case 3: u32EffAddr = pCtx->ebx; break;
12164 case 4: /* SIB */
12165 {
12166 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12167
12168 /* Get the index and scale it. */
12169 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12170 {
12171 case 0: u32EffAddr = pCtx->eax; break;
12172 case 1: u32EffAddr = pCtx->ecx; break;
12173 case 2: u32EffAddr = pCtx->edx; break;
12174 case 3: u32EffAddr = pCtx->ebx; break;
12175 case 4: u32EffAddr = 0; /*none */ break;
12176 case 5: u32EffAddr = pCtx->ebp; break;
12177 case 6: u32EffAddr = pCtx->esi; break;
12178 case 7: u32EffAddr = pCtx->edi; break;
12179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12180 }
12181 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12182
12183 /* add base */
12184 switch (bSib & X86_SIB_BASE_MASK)
12185 {
12186 case 0: u32EffAddr += pCtx->eax; break;
12187 case 1: u32EffAddr += pCtx->ecx; break;
12188 case 2: u32EffAddr += pCtx->edx; break;
12189 case 3: u32EffAddr += pCtx->ebx; break;
12190 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12191 case 5:
12192 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12193 {
12194 u32EffAddr += pCtx->ebp;
12195 SET_SS_DEF();
12196 }
12197 else
12198 {
12199 uint32_t u32Disp;
12200 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12201 u32EffAddr += u32Disp;
12202 }
12203 break;
12204 case 6: u32EffAddr += pCtx->esi; break;
12205 case 7: u32EffAddr += pCtx->edi; break;
12206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12207 }
12208 break;
12209 }
12210 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12211 case 6: u32EffAddr = pCtx->esi; break;
12212 case 7: u32EffAddr = pCtx->edi; break;
12213 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12214 }
12215
12216 /* Get and add the displacement. */
12217 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12218 {
12219 case 0:
12220 break;
12221 case 1:
12222 {
12223 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12224 u32EffAddr += i8Disp;
12225 break;
12226 }
12227 case 2:
12228 {
12229 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12230 u32EffAddr += u32Disp;
12231 break;
12232 }
12233 default:
12234 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12235 }
12236 }
12237
12238 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12239 {
12240 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12241 return u32EffAddr;
12242 }
12243 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12244 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12245 return u32EffAddr & UINT16_MAX;
12246 }
12247
12248 uint64_t u64EffAddr;
12249
12250 /* Handle the rip+disp32 form with no registers first. */
12251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12252 {
12253 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12254 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12255 }
12256 else
12257 {
12258 /* Get the register (or SIB) value. */
12259 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12260 {
12261 case 0: u64EffAddr = pCtx->rax; break;
12262 case 1: u64EffAddr = pCtx->rcx; break;
12263 case 2: u64EffAddr = pCtx->rdx; break;
12264 case 3: u64EffAddr = pCtx->rbx; break;
12265 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12266 case 6: u64EffAddr = pCtx->rsi; break;
12267 case 7: u64EffAddr = pCtx->rdi; break;
12268 case 8: u64EffAddr = pCtx->r8; break;
12269 case 9: u64EffAddr = pCtx->r9; break;
12270 case 10: u64EffAddr = pCtx->r10; break;
12271 case 11: u64EffAddr = pCtx->r11; break;
12272 case 13: u64EffAddr = pCtx->r13; break;
12273 case 14: u64EffAddr = pCtx->r14; break;
12274 case 15: u64EffAddr = pCtx->r15; break;
12275 /* SIB */
12276 case 4:
12277 case 12:
12278 {
12279 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12280
12281 /* Get the index and scale it. */
12282 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12283 {
12284 case 0: u64EffAddr = pCtx->rax; break;
12285 case 1: u64EffAddr = pCtx->rcx; break;
12286 case 2: u64EffAddr = pCtx->rdx; break;
12287 case 3: u64EffAddr = pCtx->rbx; break;
12288 case 4: u64EffAddr = 0; /*none */ break;
12289 case 5: u64EffAddr = pCtx->rbp; break;
12290 case 6: u64EffAddr = pCtx->rsi; break;
12291 case 7: u64EffAddr = pCtx->rdi; break;
12292 case 8: u64EffAddr = pCtx->r8; break;
12293 case 9: u64EffAddr = pCtx->r9; break;
12294 case 10: u64EffAddr = pCtx->r10; break;
12295 case 11: u64EffAddr = pCtx->r11; break;
12296 case 12: u64EffAddr = pCtx->r12; break;
12297 case 13: u64EffAddr = pCtx->r13; break;
12298 case 14: u64EffAddr = pCtx->r14; break;
12299 case 15: u64EffAddr = pCtx->r15; break;
12300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12301 }
12302 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12303
12304 /* add base */
12305 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12306 {
12307 case 0: u64EffAddr += pCtx->rax; break;
12308 case 1: u64EffAddr += pCtx->rcx; break;
12309 case 2: u64EffAddr += pCtx->rdx; break;
12310 case 3: u64EffAddr += pCtx->rbx; break;
12311 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12312 case 6: u64EffAddr += pCtx->rsi; break;
12313 case 7: u64EffAddr += pCtx->rdi; break;
12314 case 8: u64EffAddr += pCtx->r8; break;
12315 case 9: u64EffAddr += pCtx->r9; break;
12316 case 10: u64EffAddr += pCtx->r10; break;
12317 case 11: u64EffAddr += pCtx->r11; break;
12318 case 12: u64EffAddr += pCtx->r12; break;
12319 case 14: u64EffAddr += pCtx->r14; break;
12320 case 15: u64EffAddr += pCtx->r15; break;
12321 /* complicated encodings */
12322 case 5:
12323 case 13:
12324 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12325 {
12326 if (!pVCpu->iem.s.uRexB)
12327 {
12328 u64EffAddr += pCtx->rbp;
12329 SET_SS_DEF();
12330 }
12331 else
12332 u64EffAddr += pCtx->r13;
12333 }
12334 else
12335 {
12336 uint32_t u32Disp;
12337 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12338 u64EffAddr += (int32_t)u32Disp;
12339 }
12340 break;
12341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12342 }
12343 break;
12344 }
12345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12346 }
12347
12348 /* Get and add the displacement. */
12349 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12350 {
12351 case 0:
12352 break;
12353 case 1:
12354 {
12355 int8_t i8Disp;
12356 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12357 u64EffAddr += i8Disp;
12358 break;
12359 }
12360 case 2:
12361 {
12362 uint32_t u32Disp;
12363 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12364 u64EffAddr += (int32_t)u32Disp;
12365 break;
12366 }
12367 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12368 }
12369
12370 }
12371
12372 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12373 {
12374 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12375 return u64EffAddr;
12376 }
12377 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12378 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12379 return u64EffAddr & UINT32_MAX;
12380}
12381#endif /* IEM_WITH_SETJMP */
12382
12383
12384/** @} */
12385
12386
12387
12388/*
12389 * Include the instructions
12390 */
12391#include "IEMAllInstructions.cpp.h"
12392
12393
12394
12395
12396#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12397
12398/**
12399 * Sets up execution verification mode.
12400 */
12401IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12402{
12403 PVMCPU pVCpu = pVCpu;
12404 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12405
12406 /*
12407 * Always note down the address of the current instruction.
12408 */
12409 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12410 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12411
12412 /*
12413 * Enable verification and/or logging.
12414 */
12415 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12416 if ( fNewNoRem
12417 && ( 0
12418#if 0 /* auto enable on first paged protected mode interrupt */
12419 || ( pOrgCtx->eflags.Bits.u1IF
12420 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12421 && TRPMHasTrap(pVCpu)
12422 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12423#endif
12424#if 0
12425 || ( pOrgCtx->cs == 0x10
12426 && ( pOrgCtx->rip == 0x90119e3e
12427 || pOrgCtx->rip == 0x901d9810)
12428#endif
12429#if 0 /* Auto enable DSL - FPU stuff. */
12430 || ( pOrgCtx->cs == 0x10
12431 && (// pOrgCtx->rip == 0xc02ec07f
12432 //|| pOrgCtx->rip == 0xc02ec082
12433 //|| pOrgCtx->rip == 0xc02ec0c9
12434 0
12435 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12436#endif
12437#if 0 /* Auto enable DSL - fstp st0 stuff. */
12438 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12439#endif
12440#if 0
12441 || pOrgCtx->rip == 0x9022bb3a
12442#endif
12443#if 0
12444 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12445#endif
12446#if 0
12447 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12448 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12449#endif
12450#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12451 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12452 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12453 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12454#endif
12455#if 0 /* NT4SP1 - xadd early boot. */
12456 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12457#endif
12458#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12459 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12460#endif
12461#if 0 /* NT4SP1 - cmpxchg (AMD). */
12462 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12463#endif
12464#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12465 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12466#endif
12467#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12468 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12469
12470#endif
12471#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12472 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12473
12474#endif
12475#if 0 /* NT4SP1 - frstor [ecx] */
12476 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12477#endif
12478#if 0 /* xxxxxx - All long mode code. */
12479 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12480#endif
12481#if 0 /* rep movsq linux 3.7 64-bit boot. */
12482 || (pOrgCtx->rip == 0x0000000000100241)
12483#endif
12484#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12485 || (pOrgCtx->rip == 0x000000000215e240)
12486#endif
12487#if 0 /* DOS's size-overridden iret to v8086. */
12488 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12489#endif
12490 )
12491 )
12492 {
12493 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12494 RTLogFlags(NULL, "enabled");
12495 fNewNoRem = false;
12496 }
12497 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12498 {
12499 pVCpu->iem.s.fNoRem = fNewNoRem;
12500 if (!fNewNoRem)
12501 {
12502 LogAlways(("Enabling verification mode!\n"));
12503 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12504 }
12505 else
12506 LogAlways(("Disabling verification mode!\n"));
12507 }
12508
12509 /*
12510 * Switch state.
12511 */
12512 if (IEM_VERIFICATION_ENABLED(pVCpu))
12513 {
12514 static CPUMCTX s_DebugCtx; /* Ugly! */
12515
12516 s_DebugCtx = *pOrgCtx;
12517 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12518 }
12519
12520 /*
12521 * See if there is an interrupt pending in TRPM and inject it if we can.
12522 */
12523 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12524 if ( pOrgCtx->eflags.Bits.u1IF
12525 && TRPMHasTrap(pVCpu)
12526 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12527 {
12528 uint8_t u8TrapNo;
12529 TRPMEVENT enmType;
12530 RTGCUINT uErrCode;
12531 RTGCPTR uCr2;
12532 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12533 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12534 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12535 TRPMResetTrap(pVCpu);
12536 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12537 }
12538
12539 /*
12540 * Reset the counters.
12541 */
12542 pVCpu->iem.s.cIOReads = 0;
12543 pVCpu->iem.s.cIOWrites = 0;
12544 pVCpu->iem.s.fIgnoreRaxRdx = false;
12545 pVCpu->iem.s.fOverlappingMovs = false;
12546 pVCpu->iem.s.fProblematicMemory = false;
12547 pVCpu->iem.s.fUndefinedEFlags = 0;
12548
12549 if (IEM_VERIFICATION_ENABLED(pVCpu))
12550 {
12551 /*
12552 * Free all verification records.
12553 */
12554 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12555 pVCpu->iem.s.pIemEvtRecHead = NULL;
12556 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12557 do
12558 {
12559 while (pEvtRec)
12560 {
12561 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12562 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12563 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12564 pEvtRec = pNext;
12565 }
12566 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12567 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12568 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12569 } while (pEvtRec);
12570 }
12571}
12572
12573
12574/**
12575 * Allocate an event record.
12576 * @returns Pointer to a record.
12577 */
12578IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12579{
12580 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12581 return NULL;
12582
12583 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12584 if (pEvtRec)
12585 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12586 else
12587 {
12588 if (!pVCpu->iem.s.ppIemEvtRecNext)
12589 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12590
12591 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12592 if (!pEvtRec)
12593 return NULL;
12594 }
12595 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12596 pEvtRec->pNext = NULL;
12597 return pEvtRec;
12598}
12599
12600
12601/**
12602 * IOMMMIORead notification.
12603 */
12604VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12605{
12606 PVMCPU pVCpu = VMMGetCpu(pVM);
12607 if (!pVCpu)
12608 return;
12609 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12610 if (!pEvtRec)
12611 return;
12612 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12613 pEvtRec->u.RamRead.GCPhys = GCPhys;
12614 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12615 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12616 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12617}
12618
12619
12620/**
12621 * IOMMMIOWrite notification.
12622 */
12623VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12624{
12625 PVMCPU pVCpu = VMMGetCpu(pVM);
12626 if (!pVCpu)
12627 return;
12628 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12629 if (!pEvtRec)
12630 return;
12631 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12632 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12633 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12634 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12635 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12636 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12637 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12638 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12639 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12640}
12641
12642
12643/**
12644 * IOMIOPortRead notification.
12645 */
12646VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12647{
12648 PVMCPU pVCpu = VMMGetCpu(pVM);
12649 if (!pVCpu)
12650 return;
12651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12652 if (!pEvtRec)
12653 return;
12654 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12655 pEvtRec->u.IOPortRead.Port = Port;
12656 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12657 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12658 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12659}
12660
12661/**
12662 * IOMIOPortWrite notification.
12663 */
12664VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12665{
12666 PVMCPU pVCpu = VMMGetCpu(pVM);
12667 if (!pVCpu)
12668 return;
12669 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12670 if (!pEvtRec)
12671 return;
12672 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12673 pEvtRec->u.IOPortWrite.Port = Port;
12674 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12675 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12676 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12677 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12678}
12679
12680
12681VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12682{
12683 PVMCPU pVCpu = VMMGetCpu(pVM);
12684 if (!pVCpu)
12685 return;
12686 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12687 if (!pEvtRec)
12688 return;
12689 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12690 pEvtRec->u.IOPortStrRead.Port = Port;
12691 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12692 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12693 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12694 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12695}
12696
12697
12698VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12699{
12700 PVMCPU pVCpu = VMMGetCpu(pVM);
12701 if (!pVCpu)
12702 return;
12703 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12704 if (!pEvtRec)
12705 return;
12706 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12707 pEvtRec->u.IOPortStrWrite.Port = Port;
12708 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12709 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12710 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12711 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12712}
12713
12714
12715/**
12716 * Fakes and records an I/O port read.
12717 *
12718 * @returns VINF_SUCCESS.
12719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12720 * @param Port The I/O port.
12721 * @param pu32Value Where to store the fake value.
12722 * @param cbValue The size of the access.
12723 */
12724IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12725{
12726 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12727 if (pEvtRec)
12728 {
12729 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12730 pEvtRec->u.IOPortRead.Port = Port;
12731 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12732 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12733 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12734 }
12735 pVCpu->iem.s.cIOReads++;
12736 *pu32Value = 0xcccccccc;
12737 return VINF_SUCCESS;
12738}
12739
12740
12741/**
12742 * Fakes and records an I/O port write.
12743 *
12744 * @returns VINF_SUCCESS.
12745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12746 * @param Port The I/O port.
12747 * @param u32Value The value being written.
12748 * @param cbValue The size of the access.
12749 */
12750IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12751{
12752 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12753 if (pEvtRec)
12754 {
12755 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12756 pEvtRec->u.IOPortWrite.Port = Port;
12757 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12758 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12759 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12760 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12761 }
12762 pVCpu->iem.s.cIOWrites++;
12763 return VINF_SUCCESS;
12764}
12765
12766
12767/**
12768 * Used to add extra details about a stub case.
12769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12770 */
12771IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12772{
12773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12774 PVM pVM = pVCpu->CTX_SUFF(pVM);
12775 PVMCPU pVCpu = pVCpu;
12776 char szRegs[4096];
12777 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12778 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12779 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12780 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12781 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12782 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12783 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12784 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12785 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12786 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12787 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12788 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12789 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12790 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12791 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12792 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12793 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12794 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12795 " efer=%016VR{efer}\n"
12796 " pat=%016VR{pat}\n"
12797 " sf_mask=%016VR{sf_mask}\n"
12798 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12799 " lstar=%016VR{lstar}\n"
12800 " star=%016VR{star} cstar=%016VR{cstar}\n"
12801 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12802 );
12803
12804 char szInstr1[256];
12805 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12806 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12807 szInstr1, sizeof(szInstr1), NULL);
12808 char szInstr2[256];
12809 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12810 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12811 szInstr2, sizeof(szInstr2), NULL);
12812
12813 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12814}
12815
12816
12817/**
12818 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12819 * dump to the assertion info.
12820 *
12821 * @param pEvtRec The record to dump.
12822 */
12823IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12824{
12825 switch (pEvtRec->enmEvent)
12826 {
12827 case IEMVERIFYEVENT_IOPORT_READ:
12828 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12829 pEvtRec->u.IOPortWrite.Port,
12830 pEvtRec->u.IOPortWrite.cbValue);
12831 break;
12832 case IEMVERIFYEVENT_IOPORT_WRITE:
12833 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12834 pEvtRec->u.IOPortWrite.Port,
12835 pEvtRec->u.IOPortWrite.cbValue,
12836 pEvtRec->u.IOPortWrite.u32Value);
12837 break;
12838 case IEMVERIFYEVENT_IOPORT_STR_READ:
12839 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12840 pEvtRec->u.IOPortStrWrite.Port,
12841 pEvtRec->u.IOPortStrWrite.cbValue,
12842 pEvtRec->u.IOPortStrWrite.cTransfers);
12843 break;
12844 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12845 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12846 pEvtRec->u.IOPortStrWrite.Port,
12847 pEvtRec->u.IOPortStrWrite.cbValue,
12848 pEvtRec->u.IOPortStrWrite.cTransfers);
12849 break;
12850 case IEMVERIFYEVENT_RAM_READ:
12851 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12852 pEvtRec->u.RamRead.GCPhys,
12853 pEvtRec->u.RamRead.cb);
12854 break;
12855 case IEMVERIFYEVENT_RAM_WRITE:
12856 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12857 pEvtRec->u.RamWrite.GCPhys,
12858 pEvtRec->u.RamWrite.cb,
12859 (int)pEvtRec->u.RamWrite.cb,
12860 pEvtRec->u.RamWrite.ab);
12861 break;
12862 default:
12863 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12864 break;
12865 }
12866}
12867
12868
12869/**
12870 * Raises an assertion on the specified record, showing the given message with
12871 * a record dump attached.
12872 *
12873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12874 * @param pEvtRec1 The first record.
12875 * @param pEvtRec2 The second record.
12876 * @param pszMsg The message explaining why we're asserting.
12877 */
12878IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12879{
12880 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12881 iemVerifyAssertAddRecordDump(pEvtRec1);
12882 iemVerifyAssertAddRecordDump(pEvtRec2);
12883 iemVerifyAssertMsg2(pVCpu);
12884 RTAssertPanic();
12885}
12886
12887
12888/**
12889 * Raises an assertion on the specified record, showing the given message with
12890 * a record dump attached.
12891 *
12892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12893 * @param pEvtRec1 The first record.
12894 * @param pszMsg The message explaining why we're asserting.
12895 */
12896IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
12897{
12898 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12899 iemVerifyAssertAddRecordDump(pEvtRec);
12900 iemVerifyAssertMsg2(pVCpu);
12901 RTAssertPanic();
12902}
12903
12904
12905/**
12906 * Verifies a write record.
12907 *
12908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12909 * @param pEvtRec The write record.
12910 * @param fRem Set if REM was doing the other executing. If clear
12911 * it was HM.
12912 */
12913IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
12914{
12915 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
12916 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
12917 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
12918 if ( RT_FAILURE(rc)
12919 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
12920 {
12921 /* fend off ins */
12922 if ( !pVCpu->iem.s.cIOReads
12923 || pEvtRec->u.RamWrite.ab[0] != 0xcc
12924 || ( pEvtRec->u.RamWrite.cb != 1
12925 && pEvtRec->u.RamWrite.cb != 2
12926 && pEvtRec->u.RamWrite.cb != 4) )
12927 {
12928 /* fend off ROMs and MMIO */
12929 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
12930 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
12931 {
12932 /* fend off fxsave */
12933 if (pEvtRec->u.RamWrite.cb != 512)
12934 {
12935 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
12936 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12937 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
12938 RTAssertMsg2Add("%s: %.*Rhxs\n"
12939 "iem: %.*Rhxs\n",
12940 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
12941 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
12942 iemVerifyAssertAddRecordDump(pEvtRec);
12943 iemVerifyAssertMsg2(pVCpu);
12944 RTAssertPanic();
12945 }
12946 }
12947 }
12948 }
12949
12950}
12951
12952/**
12953 * Performs the post-execution verfication checks.
12954 */
12955IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
12956{
12957 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12958 return rcStrictIem;
12959
12960 /*
12961 * Switch back the state.
12962 */
12963 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
12964 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
12965 Assert(pOrgCtx != pDebugCtx);
12966 IEM_GET_CTX(pVCpu) = pOrgCtx;
12967
12968 /*
12969 * Execute the instruction in REM.
12970 */
12971 bool fRem = false;
12972 PVM pVM = pVCpu->CTX_SUFF(pVM);
12973 PVMCPU pVCpu = pVCpu;
12974 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
12975#ifdef IEM_VERIFICATION_MODE_FULL_HM
12976 if ( HMIsEnabled(pVM)
12977 && pVCpu->iem.s.cIOReads == 0
12978 && pVCpu->iem.s.cIOWrites == 0
12979 && !pVCpu->iem.s.fProblematicMemory)
12980 {
12981 uint64_t uStartRip = pOrgCtx->rip;
12982 unsigned iLoops = 0;
12983 do
12984 {
12985 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
12986 iLoops++;
12987 } while ( rc == VINF_SUCCESS
12988 || ( rc == VINF_EM_DBG_STEPPED
12989 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12990 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
12991 || ( pOrgCtx->rip != pDebugCtx->rip
12992 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
12993 && iLoops < 8) );
12994 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
12995 rc = VINF_SUCCESS;
12996 }
12997#endif
12998 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
12999 || rc == VINF_IOM_R3_IOPORT_READ
13000 || rc == VINF_IOM_R3_IOPORT_WRITE
13001 || rc == VINF_IOM_R3_MMIO_READ
13002 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13003 || rc == VINF_IOM_R3_MMIO_WRITE
13004 || rc == VINF_CPUM_R3_MSR_READ
13005 || rc == VINF_CPUM_R3_MSR_WRITE
13006 || rc == VINF_EM_RESCHEDULE
13007 )
13008 {
13009 EMRemLock(pVM);
13010 rc = REMR3EmulateInstruction(pVM, pVCpu);
13011 AssertRC(rc);
13012 EMRemUnlock(pVM);
13013 fRem = true;
13014 }
13015
13016# if 1 /* Skip unimplemented instructions for now. */
13017 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13018 {
13019 IEM_GET_CTX(pVCpu) = pOrgCtx;
13020 if (rc == VINF_EM_DBG_STEPPED)
13021 return VINF_SUCCESS;
13022 return rc;
13023 }
13024# endif
13025
13026 /*
13027 * Compare the register states.
13028 */
13029 unsigned cDiffs = 0;
13030 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13031 {
13032 //Log(("REM and IEM ends up with different registers!\n"));
13033 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13034
13035# define CHECK_FIELD(a_Field) \
13036 do \
13037 { \
13038 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13039 { \
13040 switch (sizeof(pOrgCtx->a_Field)) \
13041 { \
13042 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13043 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13044 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13045 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13046 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13047 } \
13048 cDiffs++; \
13049 } \
13050 } while (0)
13051# define CHECK_XSTATE_FIELD(a_Field) \
13052 do \
13053 { \
13054 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13055 { \
13056 switch (sizeof(pOrgXState->a_Field)) \
13057 { \
13058 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13059 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13060 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13061 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13062 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13063 } \
13064 cDiffs++; \
13065 } \
13066 } while (0)
13067
13068# define CHECK_BIT_FIELD(a_Field) \
13069 do \
13070 { \
13071 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13072 { \
13073 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13074 cDiffs++; \
13075 } \
13076 } while (0)
13077
13078# define CHECK_SEL(a_Sel) \
13079 do \
13080 { \
13081 CHECK_FIELD(a_Sel.Sel); \
13082 CHECK_FIELD(a_Sel.Attr.u); \
13083 CHECK_FIELD(a_Sel.u64Base); \
13084 CHECK_FIELD(a_Sel.u32Limit); \
13085 CHECK_FIELD(a_Sel.fFlags); \
13086 } while (0)
13087
13088 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13089 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13090
13091#if 1 /* The recompiler doesn't update these the intel way. */
13092 if (fRem)
13093 {
13094 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13095 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13096 pOrgXState->x87.CS = pDebugXState->x87.CS;
13097 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13098 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13099 pOrgXState->x87.DS = pDebugXState->x87.DS;
13100 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13101 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13102 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13103 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13104 }
13105#endif
13106 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13107 {
13108 RTAssertMsg2Weak(" the FPU state differs\n");
13109 cDiffs++;
13110 CHECK_XSTATE_FIELD(x87.FCW);
13111 CHECK_XSTATE_FIELD(x87.FSW);
13112 CHECK_XSTATE_FIELD(x87.FTW);
13113 CHECK_XSTATE_FIELD(x87.FOP);
13114 CHECK_XSTATE_FIELD(x87.FPUIP);
13115 CHECK_XSTATE_FIELD(x87.CS);
13116 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13117 CHECK_XSTATE_FIELD(x87.FPUDP);
13118 CHECK_XSTATE_FIELD(x87.DS);
13119 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13120 CHECK_XSTATE_FIELD(x87.MXCSR);
13121 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13122 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13123 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13124 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13125 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13126 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13127 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13128 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13129 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13130 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13131 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13132 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13133 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13134 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13135 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13136 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13137 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13138 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13139 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13140 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13141 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13142 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13143 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13144 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13145 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13146 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13147 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13148 }
13149 CHECK_FIELD(rip);
13150 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13151 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13152 {
13153 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13154 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13155 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13156 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13157 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13158 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13159 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13160 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13161 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13162 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13163 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13164 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13165 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13166 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13167 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13168 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13169 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13170 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13171 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13172 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13173 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13174 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13175 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13176 }
13177
13178 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13179 CHECK_FIELD(rax);
13180 CHECK_FIELD(rcx);
13181 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13182 CHECK_FIELD(rdx);
13183 CHECK_FIELD(rbx);
13184 CHECK_FIELD(rsp);
13185 CHECK_FIELD(rbp);
13186 CHECK_FIELD(rsi);
13187 CHECK_FIELD(rdi);
13188 CHECK_FIELD(r8);
13189 CHECK_FIELD(r9);
13190 CHECK_FIELD(r10);
13191 CHECK_FIELD(r11);
13192 CHECK_FIELD(r12);
13193 CHECK_FIELD(r13);
13194 CHECK_SEL(cs);
13195 CHECK_SEL(ss);
13196 CHECK_SEL(ds);
13197 CHECK_SEL(es);
13198 CHECK_SEL(fs);
13199 CHECK_SEL(gs);
13200 CHECK_FIELD(cr0);
13201
13202 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13203 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13204 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13205 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13206 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13207 {
13208 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13209 { /* ignore */ }
13210 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13211 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13212 && fRem)
13213 { /* ignore */ }
13214 else
13215 CHECK_FIELD(cr2);
13216 }
13217 CHECK_FIELD(cr3);
13218 CHECK_FIELD(cr4);
13219 CHECK_FIELD(dr[0]);
13220 CHECK_FIELD(dr[1]);
13221 CHECK_FIELD(dr[2]);
13222 CHECK_FIELD(dr[3]);
13223 CHECK_FIELD(dr[6]);
13224 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13225 CHECK_FIELD(dr[7]);
13226 CHECK_FIELD(gdtr.cbGdt);
13227 CHECK_FIELD(gdtr.pGdt);
13228 CHECK_FIELD(idtr.cbIdt);
13229 CHECK_FIELD(idtr.pIdt);
13230 CHECK_SEL(ldtr);
13231 CHECK_SEL(tr);
13232 CHECK_FIELD(SysEnter.cs);
13233 CHECK_FIELD(SysEnter.eip);
13234 CHECK_FIELD(SysEnter.esp);
13235 CHECK_FIELD(msrEFER);
13236 CHECK_FIELD(msrSTAR);
13237 CHECK_FIELD(msrPAT);
13238 CHECK_FIELD(msrLSTAR);
13239 CHECK_FIELD(msrCSTAR);
13240 CHECK_FIELD(msrSFMASK);
13241 CHECK_FIELD(msrKERNELGSBASE);
13242
13243 if (cDiffs != 0)
13244 {
13245 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13246 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13247 RTAssertPanic();
13248 static bool volatile s_fEnterDebugger = true;
13249 if (s_fEnterDebugger)
13250 DBGFSTOP(pVM);
13251
13252# if 1 /* Ignore unimplemented instructions for now. */
13253 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13254 rcStrictIem = VINF_SUCCESS;
13255# endif
13256 }
13257# undef CHECK_FIELD
13258# undef CHECK_BIT_FIELD
13259 }
13260
13261 /*
13262 * If the register state compared fine, check the verification event
13263 * records.
13264 */
13265 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13266 {
13267 /*
13268 * Compare verficiation event records.
13269 * - I/O port accesses should be a 1:1 match.
13270 */
13271 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13272 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13273 while (pIemRec && pOtherRec)
13274 {
13275 /* Since we might miss RAM writes and reads, ignore reads and check
13276 that any written memory is the same extra ones. */
13277 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13278 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13279 && pIemRec->pNext)
13280 {
13281 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13282 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13283 pIemRec = pIemRec->pNext;
13284 }
13285
13286 /* Do the compare. */
13287 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13288 {
13289 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13290 break;
13291 }
13292 bool fEquals;
13293 switch (pIemRec->enmEvent)
13294 {
13295 case IEMVERIFYEVENT_IOPORT_READ:
13296 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13297 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13298 break;
13299 case IEMVERIFYEVENT_IOPORT_WRITE:
13300 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13301 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13302 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13303 break;
13304 case IEMVERIFYEVENT_IOPORT_STR_READ:
13305 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13306 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13307 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13308 break;
13309 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13310 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13311 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13312 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13313 break;
13314 case IEMVERIFYEVENT_RAM_READ:
13315 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13316 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13317 break;
13318 case IEMVERIFYEVENT_RAM_WRITE:
13319 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13320 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13321 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13322 break;
13323 default:
13324 fEquals = false;
13325 break;
13326 }
13327 if (!fEquals)
13328 {
13329 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13330 break;
13331 }
13332
13333 /* advance */
13334 pIemRec = pIemRec->pNext;
13335 pOtherRec = pOtherRec->pNext;
13336 }
13337
13338 /* Ignore extra writes and reads. */
13339 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13340 {
13341 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13342 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13343 pIemRec = pIemRec->pNext;
13344 }
13345 if (pIemRec != NULL)
13346 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13347 else if (pOtherRec != NULL)
13348 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13349 }
13350 IEM_GET_CTX(pVCpu) = pOrgCtx;
13351
13352 return rcStrictIem;
13353}
13354
13355#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13356
13357/* stubs */
13358IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13359{
13360 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13361 return VERR_INTERNAL_ERROR;
13362}
13363
13364IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13365{
13366 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13367 return VERR_INTERNAL_ERROR;
13368}
13369
13370#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13371
13372
13373#ifdef LOG_ENABLED
13374/**
13375 * Logs the current instruction.
13376 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13377 * @param pCtx The current CPU context.
13378 * @param fSameCtx Set if we have the same context information as the VMM,
13379 * clear if we may have already executed an instruction in
13380 * our debug context. When clear, we assume IEMCPU holds
13381 * valid CPU mode info.
13382 */
13383IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13384{
13385# ifdef IN_RING3
13386 if (LogIs2Enabled())
13387 {
13388 char szInstr[256];
13389 uint32_t cbInstr = 0;
13390 if (fSameCtx)
13391 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13392 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13393 szInstr, sizeof(szInstr), &cbInstr);
13394 else
13395 {
13396 uint32_t fFlags = 0;
13397 switch (pVCpu->iem.s.enmCpuMode)
13398 {
13399 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13400 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13401 case IEMMODE_16BIT:
13402 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13403 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13404 else
13405 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13406 break;
13407 }
13408 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13409 szInstr, sizeof(szInstr), &cbInstr);
13410 }
13411
13412 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13413 Log2(("****\n"
13414 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13415 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13416 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13417 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13418 " %s\n"
13419 ,
13420 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13421 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13422 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13423 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13424 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13425 szInstr));
13426
13427 if (LogIs3Enabled())
13428 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13429 }
13430 else
13431# endif
13432 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13433 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13434}
13435#endif
13436
13437
13438/**
13439 * Makes status code addjustments (pass up from I/O and access handler)
13440 * as well as maintaining statistics.
13441 *
13442 * @returns Strict VBox status code to pass up.
13443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13444 * @param rcStrict The status from executing an instruction.
13445 */
13446DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13447{
13448 if (rcStrict != VINF_SUCCESS)
13449 {
13450 if (RT_SUCCESS(rcStrict))
13451 {
13452 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13453 || rcStrict == VINF_IOM_R3_IOPORT_READ
13454 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13455 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13456 || rcStrict == VINF_IOM_R3_MMIO_READ
13457 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13458 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13459 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13460 || rcStrict == VINF_CPUM_R3_MSR_READ
13461 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13462 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13463 || rcStrict == VINF_EM_RAW_TO_R3
13464 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13465 /* raw-mode / virt handlers only: */
13466 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13467 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13468 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13469 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13470 || rcStrict == VINF_SELM_SYNC_GDT
13471 || rcStrict == VINF_CSAM_PENDING_ACTION
13472 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13473 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13474/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13475 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13476 if (rcPassUp == VINF_SUCCESS)
13477 pVCpu->iem.s.cRetInfStatuses++;
13478 else if ( rcPassUp < VINF_EM_FIRST
13479 || rcPassUp > VINF_EM_LAST
13480 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13481 {
13482 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13483 pVCpu->iem.s.cRetPassUpStatus++;
13484 rcStrict = rcPassUp;
13485 }
13486 else
13487 {
13488 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13489 pVCpu->iem.s.cRetInfStatuses++;
13490 }
13491 }
13492 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13493 pVCpu->iem.s.cRetAspectNotImplemented++;
13494 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13495 pVCpu->iem.s.cRetInstrNotImplemented++;
13496#ifdef IEM_VERIFICATION_MODE_FULL
13497 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13498 rcStrict = VINF_SUCCESS;
13499#endif
13500 else
13501 pVCpu->iem.s.cRetErrStatuses++;
13502 }
13503 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13504 {
13505 pVCpu->iem.s.cRetPassUpStatus++;
13506 rcStrict = pVCpu->iem.s.rcPassUp;
13507 }
13508
13509 return rcStrict;
13510}
13511
13512
13513/**
13514 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13515 * IEMExecOneWithPrefetchedByPC.
13516 *
13517 * Similar code is found in IEMExecLots.
13518 *
13519 * @return Strict VBox status code.
13520 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13522 * @param fExecuteInhibit If set, execute the instruction following CLI,
13523 * POP SS and MOV SS,GR.
13524 */
13525#ifdef __GNUC__
13526DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13527#else
13528DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13529#endif
13530{
13531#ifdef IEM_WITH_SETJMP
13532 VBOXSTRICTRC rcStrict;
13533 jmp_buf JmpBuf;
13534 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13535 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13536 if ((rcStrict = setjmp(JmpBuf)) == 0)
13537 {
13538 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13539 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13540 }
13541 else
13542 pVCpu->iem.s.cLongJumps++;
13543 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13544#else
13545 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13546 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13547#endif
13548 if (rcStrict == VINF_SUCCESS)
13549 pVCpu->iem.s.cInstructions++;
13550 if (pVCpu->iem.s.cActiveMappings > 0)
13551 {
13552 Assert(rcStrict != VINF_SUCCESS);
13553 iemMemRollback(pVCpu);
13554 }
13555//#ifdef DEBUG
13556// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13557//#endif
13558
13559 /* Execute the next instruction as well if a cli, pop ss or
13560 mov ss, Gr has just completed successfully. */
13561 if ( fExecuteInhibit
13562 && rcStrict == VINF_SUCCESS
13563 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13564 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13565 {
13566 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13567 if (rcStrict == VINF_SUCCESS)
13568 {
13569#ifdef LOG_ENABLED
13570 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13571#endif
13572#ifdef IEM_WITH_SETJMP
13573 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13574 if ((rcStrict = setjmp(JmpBuf)) == 0)
13575 {
13576 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13577 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13578 }
13579 else
13580 pVCpu->iem.s.cLongJumps++;
13581 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13582#else
13583 IEM_OPCODE_GET_NEXT_U8(&b);
13584 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13585#endif
13586 if (rcStrict == VINF_SUCCESS)
13587 pVCpu->iem.s.cInstructions++;
13588 if (pVCpu->iem.s.cActiveMappings > 0)
13589 {
13590 Assert(rcStrict != VINF_SUCCESS);
13591 iemMemRollback(pVCpu);
13592 }
13593 }
13594 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13595 }
13596
13597 /*
13598 * Return value fiddling, statistics and sanity assertions.
13599 */
13600 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13601
13602 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13603 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13604#if defined(IEM_VERIFICATION_MODE_FULL)
13605 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13606 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13607 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13608 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13609#endif
13610 return rcStrict;
13611}
13612
13613
13614#ifdef IN_RC
13615/**
13616 * Re-enters raw-mode or ensure we return to ring-3.
13617 *
13618 * @returns rcStrict, maybe modified.
13619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13620 * @param pCtx The current CPU context.
13621 * @param rcStrict The status code returne by the interpreter.
13622 */
13623DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13624{
13625 if ( !pVCpu->iem.s.fInPatchCode
13626 && ( rcStrict == VINF_SUCCESS
13627 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13628 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13629 {
13630 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13631 CPUMRawEnter(pVCpu);
13632 else
13633 {
13634 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13635 rcStrict = VINF_EM_RESCHEDULE;
13636 }
13637 }
13638 return rcStrict;
13639}
13640#endif
13641
13642
13643/**
13644 * Execute one instruction.
13645 *
13646 * @return Strict VBox status code.
13647 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13648 */
13649VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13650{
13651#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13652 if (++pVCpu->iem.s.cVerifyDepth == 1)
13653 iemExecVerificationModeSetup(pVCpu);
13654#endif
13655#ifdef LOG_ENABLED
13656 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13657 iemLogCurInstr(pVCpu, pCtx, true);
13658#endif
13659
13660 /*
13661 * Do the decoding and emulation.
13662 */
13663 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13664 if (rcStrict == VINF_SUCCESS)
13665 rcStrict = iemExecOneInner(pVCpu, true);
13666
13667#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13668 /*
13669 * Assert some sanity.
13670 */
13671 if (pVCpu->iem.s.cVerifyDepth == 1)
13672 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13673 pVCpu->iem.s.cVerifyDepth--;
13674#endif
13675#ifdef IN_RC
13676 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13677#endif
13678 if (rcStrict != VINF_SUCCESS)
13679 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13680 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13681 return rcStrict;
13682}
13683
13684
13685VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13686{
13687 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13688 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13689
13690 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13691 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13692 if (rcStrict == VINF_SUCCESS)
13693 {
13694 rcStrict = iemExecOneInner(pVCpu, true);
13695 if (pcbWritten)
13696 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13697 }
13698
13699#ifdef IN_RC
13700 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13701#endif
13702 return rcStrict;
13703}
13704
13705
13706VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13707 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13708{
13709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13710 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13711
13712 VBOXSTRICTRC rcStrict;
13713 if ( cbOpcodeBytes
13714 && pCtx->rip == OpcodeBytesPC)
13715 {
13716 iemInitDecoder(pVCpu, false);
13717#ifdef IEM_WITH_CODE_TLB
13718 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13719 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13720 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13721 pVCpu->iem.s.offCurInstrStart = 0;
13722 pVCpu->iem.s.offInstrNextByte = 0;
13723#else
13724 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13725 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13726#endif
13727 rcStrict = VINF_SUCCESS;
13728 }
13729 else
13730 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13731 if (rcStrict == VINF_SUCCESS)
13732 {
13733 rcStrict = iemExecOneInner(pVCpu, true);
13734 }
13735
13736#ifdef IN_RC
13737 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13738#endif
13739 return rcStrict;
13740}
13741
13742
13743VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13744{
13745 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13746 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13747
13748 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13749 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13750 if (rcStrict == VINF_SUCCESS)
13751 {
13752 rcStrict = iemExecOneInner(pVCpu, false);
13753 if (pcbWritten)
13754 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13755 }
13756
13757#ifdef IN_RC
13758 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13759#endif
13760 return rcStrict;
13761}
13762
13763
13764VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13765 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13766{
13767 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13768 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13769
13770 VBOXSTRICTRC rcStrict;
13771 if ( cbOpcodeBytes
13772 && pCtx->rip == OpcodeBytesPC)
13773 {
13774 iemInitDecoder(pVCpu, true);
13775#ifdef IEM_WITH_CODE_TLB
13776 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13777 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13778 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13779 pVCpu->iem.s.offCurInstrStart = 0;
13780 pVCpu->iem.s.offInstrNextByte = 0;
13781#else
13782 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13783 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13784#endif
13785 rcStrict = VINF_SUCCESS;
13786 }
13787 else
13788 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13789 if (rcStrict == VINF_SUCCESS)
13790 rcStrict = iemExecOneInner(pVCpu, false);
13791
13792#ifdef IN_RC
13793 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13794#endif
13795 return rcStrict;
13796}
13797
13798
13799/**
13800 * For debugging DISGetParamSize, may come in handy.
13801 *
13802 * @returns Strict VBox status code.
13803 * @param pVCpu The cross context virtual CPU structure of the
13804 * calling EMT.
13805 * @param pCtxCore The context core structure.
13806 * @param OpcodeBytesPC The PC of the opcode bytes.
13807 * @param pvOpcodeBytes Prefeched opcode bytes.
13808 * @param cbOpcodeBytes Number of prefetched bytes.
13809 * @param pcbWritten Where to return the number of bytes written.
13810 * Optional.
13811 */
13812VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13813 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13814 uint32_t *pcbWritten)
13815{
13816 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13817 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13818
13819 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13820 VBOXSTRICTRC rcStrict;
13821 if ( cbOpcodeBytes
13822 && pCtx->rip == OpcodeBytesPC)
13823 {
13824 iemInitDecoder(pVCpu, true);
13825#ifdef IEM_WITH_CODE_TLB
13826 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13827 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13828 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13829 pVCpu->iem.s.offCurInstrStart = 0;
13830 pVCpu->iem.s.offInstrNextByte = 0;
13831#else
13832 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13833 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13834#endif
13835 rcStrict = VINF_SUCCESS;
13836 }
13837 else
13838 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13839 if (rcStrict == VINF_SUCCESS)
13840 {
13841 rcStrict = iemExecOneInner(pVCpu, false);
13842 if (pcbWritten)
13843 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13844 }
13845
13846#ifdef IN_RC
13847 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13848#endif
13849 return rcStrict;
13850}
13851
13852
13853VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13854{
13855 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13856
13857#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13858 /*
13859 * See if there is an interrupt pending in TRPM, inject it if we can.
13860 */
13861 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13862# ifdef IEM_VERIFICATION_MODE_FULL
13863 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13864# endif
13865 if ( pCtx->eflags.Bits.u1IF
13866 && TRPMHasTrap(pVCpu)
13867 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13868 {
13869 uint8_t u8TrapNo;
13870 TRPMEVENT enmType;
13871 RTGCUINT uErrCode;
13872 RTGCPTR uCr2;
13873 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13874 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13875 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13876 TRPMResetTrap(pVCpu);
13877 }
13878
13879 /*
13880 * Log the state.
13881 */
13882# ifdef LOG_ENABLED
13883 iemLogCurInstr(pVCpu, pCtx, true);
13884# endif
13885
13886 /*
13887 * Do the decoding and emulation.
13888 */
13889 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13890 if (rcStrict == VINF_SUCCESS)
13891 rcStrict = iemExecOneInner(pVCpu, true);
13892
13893 /*
13894 * Assert some sanity.
13895 */
13896 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13897
13898 /*
13899 * Log and return.
13900 */
13901 if (rcStrict != VINF_SUCCESS)
13902 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13903 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13904 if (pcInstructions)
13905 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
13906 return rcStrict;
13907
13908#else /* Not verification mode */
13909
13910 /*
13911 * See if there is an interrupt pending in TRPM, inject it if we can.
13912 */
13913 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13914# ifdef IEM_VERIFICATION_MODE_FULL
13915 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13916# endif
13917 if ( pCtx->eflags.Bits.u1IF
13918 && TRPMHasTrap(pVCpu)
13919 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13920 {
13921 uint8_t u8TrapNo;
13922 TRPMEVENT enmType;
13923 RTGCUINT uErrCode;
13924 RTGCPTR uCr2;
13925 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13926 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13927 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13928 TRPMResetTrap(pVCpu);
13929 }
13930
13931 /*
13932 * Initial decoder init w/ prefetch, then setup setjmp.
13933 */
13934 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13935 if (rcStrict == VINF_SUCCESS)
13936 {
13937# ifdef IEM_WITH_SETJMP
13938 jmp_buf JmpBuf;
13939 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13940 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13941 pVCpu->iem.s.cActiveMappings = 0;
13942 if ((rcStrict = setjmp(JmpBuf)) == 0)
13943# endif
13944 {
13945 /*
13946 * The run loop. We limit ourselves to 4096 instructions right now.
13947 */
13948 PVM pVM = pVCpu->CTX_SUFF(pVM);
13949 uint32_t cInstr = 4096;
13950 for (;;)
13951 {
13952 /*
13953 * Log the state.
13954 */
13955# ifdef LOG_ENABLED
13956 iemLogCurInstr(pVCpu, pCtx, true);
13957# endif
13958
13959 /*
13960 * Do the decoding and emulation.
13961 */
13962 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13963 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13964 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13965 {
13966 Assert(pVCpu->iem.s.cActiveMappings == 0);
13967 pVCpu->iem.s.cInstructions++;
13968 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
13969 {
13970 uint32_t fCpu = pVCpu->fLocalForcedActions
13971 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
13972 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
13973 | VMCPU_FF_TLB_FLUSH
13974# ifdef VBOX_WITH_RAW_MODE
13975 | VMCPU_FF_TRPM_SYNC_IDT
13976 | VMCPU_FF_SELM_SYNC_TSS
13977 | VMCPU_FF_SELM_SYNC_GDT
13978 | VMCPU_FF_SELM_SYNC_LDT
13979# endif
13980 | VMCPU_FF_INHIBIT_INTERRUPTS
13981 | VMCPU_FF_BLOCK_NMIS ));
13982
13983 if (RT_LIKELY( ( !fCpu
13984 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
13985 && !pCtx->rflags.Bits.u1IF) )
13986 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
13987 {
13988 if (cInstr-- > 0)
13989 {
13990 Assert(pVCpu->iem.s.cActiveMappings == 0);
13991 iemReInitDecoder(pVCpu);
13992 continue;
13993 }
13994 }
13995 }
13996 Assert(pVCpu->iem.s.cActiveMappings == 0);
13997 }
13998 else if (pVCpu->iem.s.cActiveMappings > 0)
13999 iemMemRollback(pVCpu);
14000 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14001 break;
14002 }
14003 }
14004# ifdef IEM_WITH_SETJMP
14005 else
14006 {
14007 if (pVCpu->iem.s.cActiveMappings > 0)
14008 iemMemRollback(pVCpu);
14009 pVCpu->iem.s.cLongJumps++;
14010 }
14011 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14012# endif
14013
14014 /*
14015 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14016 */
14017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14019# if defined(IEM_VERIFICATION_MODE_FULL)
14020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14024# endif
14025 }
14026
14027 /*
14028 * Maybe re-enter raw-mode and log.
14029 */
14030# ifdef IN_RC
14031 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14032# endif
14033 if (rcStrict != VINF_SUCCESS)
14034 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14035 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14036 if (pcInstructions)
14037 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14038 return rcStrict;
14039#endif /* Not verification mode */
14040}
14041
14042
14043
14044/**
14045 * Injects a trap, fault, abort, software interrupt or external interrupt.
14046 *
14047 * The parameter list matches TRPMQueryTrapAll pretty closely.
14048 *
14049 * @returns Strict VBox status code.
14050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14051 * @param u8TrapNo The trap number.
14052 * @param enmType What type is it (trap/fault/abort), software
14053 * interrupt or hardware interrupt.
14054 * @param uErrCode The error code if applicable.
14055 * @param uCr2 The CR2 value if applicable.
14056 * @param cbInstr The instruction length (only relevant for
14057 * software interrupts).
14058 */
14059VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14060 uint8_t cbInstr)
14061{
14062 iemInitDecoder(pVCpu, false);
14063#ifdef DBGFTRACE_ENABLED
14064 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14065 u8TrapNo, enmType, uErrCode, uCr2);
14066#endif
14067
14068 uint32_t fFlags;
14069 switch (enmType)
14070 {
14071 case TRPM_HARDWARE_INT:
14072 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14073 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14074 uErrCode = uCr2 = 0;
14075 break;
14076
14077 case TRPM_SOFTWARE_INT:
14078 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14079 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14080 uErrCode = uCr2 = 0;
14081 break;
14082
14083 case TRPM_TRAP:
14084 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14085 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14086 if (u8TrapNo == X86_XCPT_PF)
14087 fFlags |= IEM_XCPT_FLAGS_CR2;
14088 switch (u8TrapNo)
14089 {
14090 case X86_XCPT_DF:
14091 case X86_XCPT_TS:
14092 case X86_XCPT_NP:
14093 case X86_XCPT_SS:
14094 case X86_XCPT_PF:
14095 case X86_XCPT_AC:
14096 fFlags |= IEM_XCPT_FLAGS_ERR;
14097 break;
14098
14099 case X86_XCPT_NMI:
14100 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14101 break;
14102 }
14103 break;
14104
14105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14106 }
14107
14108 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14109}
14110
14111
14112/**
14113 * Injects the active TRPM event.
14114 *
14115 * @returns Strict VBox status code.
14116 * @param pVCpu The cross context virtual CPU structure.
14117 */
14118VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14119{
14120#ifndef IEM_IMPLEMENTS_TASKSWITCH
14121 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14122#else
14123 uint8_t u8TrapNo;
14124 TRPMEVENT enmType;
14125 RTGCUINT uErrCode;
14126 RTGCUINTPTR uCr2;
14127 uint8_t cbInstr;
14128 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14129 if (RT_FAILURE(rc))
14130 return rc;
14131
14132 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14133
14134 /** @todo Are there any other codes that imply the event was successfully
14135 * delivered to the guest? See @bugref{6607}. */
14136 if ( rcStrict == VINF_SUCCESS
14137 || rcStrict == VINF_IEM_RAISED_XCPT)
14138 {
14139 TRPMResetTrap(pVCpu);
14140 }
14141 return rcStrict;
14142#endif
14143}
14144
14145
14146VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14147{
14148 return VERR_NOT_IMPLEMENTED;
14149}
14150
14151
14152VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14153{
14154 return VERR_NOT_IMPLEMENTED;
14155}
14156
14157
14158#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14159/**
14160 * Executes a IRET instruction with default operand size.
14161 *
14162 * This is for PATM.
14163 *
14164 * @returns VBox status code.
14165 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14166 * @param pCtxCore The register frame.
14167 */
14168VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14169{
14170 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14171
14172 iemCtxCoreToCtx(pCtx, pCtxCore);
14173 iemInitDecoder(pVCpu);
14174 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14175 if (rcStrict == VINF_SUCCESS)
14176 iemCtxToCtxCore(pCtxCore, pCtx);
14177 else
14178 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14179 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14180 return rcStrict;
14181}
14182#endif
14183
14184
14185/**
14186 * Macro used by the IEMExec* method to check the given instruction length.
14187 *
14188 * Will return on failure!
14189 *
14190 * @param a_cbInstr The given instruction length.
14191 * @param a_cbMin The minimum length.
14192 */
14193#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14194 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14195 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14196
14197
14198/**
14199 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14200 *
14201 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14202 *
14203 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14205 * @param rcStrict The status code to fiddle.
14206 */
14207DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14208{
14209 iemUninitExec(pVCpu);
14210#ifdef IN_RC
14211 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14212 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14213#else
14214 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14215#endif
14216}
14217
14218
14219/**
14220 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14221 *
14222 * This API ASSUMES that the caller has already verified that the guest code is
14223 * allowed to access the I/O port. (The I/O port is in the DX register in the
14224 * guest state.)
14225 *
14226 * @returns Strict VBox status code.
14227 * @param pVCpu The cross context virtual CPU structure.
14228 * @param cbValue The size of the I/O port access (1, 2, or 4).
14229 * @param enmAddrMode The addressing mode.
14230 * @param fRepPrefix Indicates whether a repeat prefix is used
14231 * (doesn't matter which for this instruction).
14232 * @param cbInstr The instruction length in bytes.
14233 * @param iEffSeg The effective segment address.
14234 * @param fIoChecked Whether the access to the I/O port has been
14235 * checked or not. It's typically checked in the
14236 * HM scenario.
14237 */
14238VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14239 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14240{
14241 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14243
14244 /*
14245 * State init.
14246 */
14247 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14248
14249 /*
14250 * Switch orgy for getting to the right handler.
14251 */
14252 VBOXSTRICTRC rcStrict;
14253 if (fRepPrefix)
14254 {
14255 switch (enmAddrMode)
14256 {
14257 case IEMMODE_16BIT:
14258 switch (cbValue)
14259 {
14260 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14261 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14262 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14263 default:
14264 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14265 }
14266 break;
14267
14268 case IEMMODE_32BIT:
14269 switch (cbValue)
14270 {
14271 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14272 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14273 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14274 default:
14275 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14276 }
14277 break;
14278
14279 case IEMMODE_64BIT:
14280 switch (cbValue)
14281 {
14282 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14283 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14284 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14285 default:
14286 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14287 }
14288 break;
14289
14290 default:
14291 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14292 }
14293 }
14294 else
14295 {
14296 switch (enmAddrMode)
14297 {
14298 case IEMMODE_16BIT:
14299 switch (cbValue)
14300 {
14301 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14302 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14303 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14304 default:
14305 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14306 }
14307 break;
14308
14309 case IEMMODE_32BIT:
14310 switch (cbValue)
14311 {
14312 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14313 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14314 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14315 default:
14316 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14317 }
14318 break;
14319
14320 case IEMMODE_64BIT:
14321 switch (cbValue)
14322 {
14323 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14324 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14325 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14326 default:
14327 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14328 }
14329 break;
14330
14331 default:
14332 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14333 }
14334 }
14335
14336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14337}
14338
14339
14340/**
14341 * Interface for HM and EM for executing string I/O IN (read) instructions.
14342 *
14343 * This API ASSUMES that the caller has already verified that the guest code is
14344 * allowed to access the I/O port. (The I/O port is in the DX register in the
14345 * guest state.)
14346 *
14347 * @returns Strict VBox status code.
14348 * @param pVCpu The cross context virtual CPU structure.
14349 * @param cbValue The size of the I/O port access (1, 2, or 4).
14350 * @param enmAddrMode The addressing mode.
14351 * @param fRepPrefix Indicates whether a repeat prefix is used
14352 * (doesn't matter which for this instruction).
14353 * @param cbInstr The instruction length in bytes.
14354 * @param fIoChecked Whether the access to the I/O port has been
14355 * checked or not. It's typically checked in the
14356 * HM scenario.
14357 */
14358VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14359 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14360{
14361 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14362
14363 /*
14364 * State init.
14365 */
14366 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14367
14368 /*
14369 * Switch orgy for getting to the right handler.
14370 */
14371 VBOXSTRICTRC rcStrict;
14372 if (fRepPrefix)
14373 {
14374 switch (enmAddrMode)
14375 {
14376 case IEMMODE_16BIT:
14377 switch (cbValue)
14378 {
14379 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14380 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14381 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14382 default:
14383 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14384 }
14385 break;
14386
14387 case IEMMODE_32BIT:
14388 switch (cbValue)
14389 {
14390 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14391 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14392 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14393 default:
14394 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14395 }
14396 break;
14397
14398 case IEMMODE_64BIT:
14399 switch (cbValue)
14400 {
14401 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14402 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14403 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14404 default:
14405 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14406 }
14407 break;
14408
14409 default:
14410 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14411 }
14412 }
14413 else
14414 {
14415 switch (enmAddrMode)
14416 {
14417 case IEMMODE_16BIT:
14418 switch (cbValue)
14419 {
14420 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14421 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14422 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14423 default:
14424 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14425 }
14426 break;
14427
14428 case IEMMODE_32BIT:
14429 switch (cbValue)
14430 {
14431 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14432 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14433 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14434 default:
14435 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14436 }
14437 break;
14438
14439 case IEMMODE_64BIT:
14440 switch (cbValue)
14441 {
14442 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14443 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14444 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14445 default:
14446 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14447 }
14448 break;
14449
14450 default:
14451 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14452 }
14453 }
14454
14455 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14456}
14457
14458
14459/**
14460 * Interface for rawmode to write execute an OUT instruction.
14461 *
14462 * @returns Strict VBox status code.
14463 * @param pVCpu The cross context virtual CPU structure.
14464 * @param cbInstr The instruction length in bytes.
14465 * @param u16Port The port to read.
14466 * @param cbReg The register size.
14467 *
14468 * @remarks In ring-0 not all of the state needs to be synced in.
14469 */
14470VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14471{
14472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14473 Assert(cbReg <= 4 && cbReg != 3);
14474
14475 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14476 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14477 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14478}
14479
14480
14481/**
14482 * Interface for rawmode to write execute an IN instruction.
14483 *
14484 * @returns Strict VBox status code.
14485 * @param pVCpu The cross context virtual CPU structure.
14486 * @param cbInstr The instruction length in bytes.
14487 * @param u16Port The port to read.
14488 * @param cbReg The register size.
14489 */
14490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14491{
14492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14493 Assert(cbReg <= 4 && cbReg != 3);
14494
14495 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14496 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14497 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14498}
14499
14500
14501/**
14502 * Interface for HM and EM to write to a CRx register.
14503 *
14504 * @returns Strict VBox status code.
14505 * @param pVCpu The cross context virtual CPU structure.
14506 * @param cbInstr The instruction length in bytes.
14507 * @param iCrReg The control register number (destination).
14508 * @param iGReg The general purpose register number (source).
14509 *
14510 * @remarks In ring-0 not all of the state needs to be synced in.
14511 */
14512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14513{
14514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14515 Assert(iCrReg < 16);
14516 Assert(iGReg < 16);
14517
14518 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14519 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14521}
14522
14523
14524/**
14525 * Interface for HM and EM to read from a CRx register.
14526 *
14527 * @returns Strict VBox status code.
14528 * @param pVCpu The cross context virtual CPU structure.
14529 * @param cbInstr The instruction length in bytes.
14530 * @param iGReg The general purpose register number (destination).
14531 * @param iCrReg The control register number (source).
14532 *
14533 * @remarks In ring-0 not all of the state needs to be synced in.
14534 */
14535VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14536{
14537 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14538 Assert(iCrReg < 16);
14539 Assert(iGReg < 16);
14540
14541 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14542 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14543 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14544}
14545
14546
14547/**
14548 * Interface for HM and EM to clear the CR0[TS] bit.
14549 *
14550 * @returns Strict VBox status code.
14551 * @param pVCpu The cross context virtual CPU structure.
14552 * @param cbInstr The instruction length in bytes.
14553 *
14554 * @remarks In ring-0 not all of the state needs to be synced in.
14555 */
14556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14557{
14558 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14559
14560 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14561 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14562 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14563}
14564
14565
14566/**
14567 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14568 *
14569 * @returns Strict VBox status code.
14570 * @param pVCpu The cross context virtual CPU structure.
14571 * @param cbInstr The instruction length in bytes.
14572 * @param uValue The value to load into CR0.
14573 *
14574 * @remarks In ring-0 not all of the state needs to be synced in.
14575 */
14576VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14577{
14578 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14579
14580 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14581 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14582 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14583}
14584
14585
14586/**
14587 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14588 *
14589 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14590 *
14591 * @returns Strict VBox status code.
14592 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14593 * @param cbInstr The instruction length in bytes.
14594 * @remarks In ring-0 not all of the state needs to be synced in.
14595 * @thread EMT(pVCpu)
14596 */
14597VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14598{
14599 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14600
14601 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14602 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14604}
14605
14606#ifdef IN_RING3
14607
14608/**
14609 * Handles the unlikely and probably fatal merge cases.
14610 *
14611 * @returns Merged status code.
14612 * @param rcStrict Current EM status code.
14613 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14614 * with @a rcStrict.
14615 * @param iMemMap The memory mapping index. For error reporting only.
14616 * @param pVCpu The cross context virtual CPU structure of the calling
14617 * thread, for error reporting only.
14618 */
14619DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14620 unsigned iMemMap, PVMCPU pVCpu)
14621{
14622 if (RT_FAILURE_NP(rcStrict))
14623 return rcStrict;
14624
14625 if (RT_FAILURE_NP(rcStrictCommit))
14626 return rcStrictCommit;
14627
14628 if (rcStrict == rcStrictCommit)
14629 return rcStrictCommit;
14630
14631 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14632 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14633 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14634 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14635 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14636 return VERR_IOM_FF_STATUS_IPE;
14637}
14638
14639
14640/**
14641 * Helper for IOMR3ProcessForceFlag.
14642 *
14643 * @returns Merged status code.
14644 * @param rcStrict Current EM status code.
14645 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14646 * with @a rcStrict.
14647 * @param iMemMap The memory mapping index. For error reporting only.
14648 * @param pVCpu The cross context virtual CPU structure of the calling
14649 * thread, for error reporting only.
14650 */
14651DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14652{
14653 /* Simple. */
14654 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14655 return rcStrictCommit;
14656
14657 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14658 return rcStrict;
14659
14660 /* EM scheduling status codes. */
14661 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14662 && rcStrict <= VINF_EM_LAST))
14663 {
14664 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14665 && rcStrictCommit <= VINF_EM_LAST))
14666 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14667 }
14668
14669 /* Unlikely */
14670 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14671}
14672
14673
14674/**
14675 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14676 *
14677 * @returns Merge between @a rcStrict and what the commit operation returned.
14678 * @param pVM The cross context VM structure.
14679 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14680 * @param rcStrict The status code returned by ring-0 or raw-mode.
14681 */
14682VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14683{
14684 /*
14685 * Reset the pending commit.
14686 */
14687 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14688 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14689 ("%#x %#x %#x\n",
14690 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14691 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14692
14693 /*
14694 * Commit the pending bounce buffers (usually just one).
14695 */
14696 unsigned cBufs = 0;
14697 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14698 while (iMemMap-- > 0)
14699 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14700 {
14701 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14702 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14703 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14704
14705 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14706 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14707 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14708
14709 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14710 {
14711 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14712 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14713 pbBuf,
14714 cbFirst,
14715 PGMACCESSORIGIN_IEM);
14716 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14717 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14718 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14719 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14720 }
14721
14722 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14723 {
14724 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14725 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14726 pbBuf + cbFirst,
14727 cbSecond,
14728 PGMACCESSORIGIN_IEM);
14729 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14730 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14731 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14732 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14733 }
14734 cBufs++;
14735 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14736 }
14737
14738 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14739 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14740 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14741 pVCpu->iem.s.cActiveMappings = 0;
14742 return rcStrict;
14743}
14744
14745#endif /* IN_RING3 */
14746
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette