VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 46175

Last change on this file since 46175 was 46168, checked in by vboxsync, 12 years ago

IEM/EM: Made DSL boot to command line (X doesn't start yet).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 314.9 KB
Line 
1/* $Id: IEMAll.cpp 46168 2013-05-19 22:58:37Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pgm.h>
86#include <internal/pgm.h>
87#include <VBox/vmm/iom.h>
88#include <VBox/vmm/em.h>
89#include <VBox/vmm/hm.h>
90#include <VBox/vmm/tm.h>
91#include <VBox/vmm/dbgf.h>
92#ifdef VBOX_WITH_RAW_MODE_NOT_R0
93# include <VBox/vmm/patm.h>
94#endif
95#include "IEMInternal.h"
96#ifdef IEM_VERIFICATION_MODE_FULL
97# include <VBox/vmm/rem.h>
98# include <VBox/vmm/mm.h>
99#endif
100#include <VBox/vmm/vm.h>
101#include <VBox/log.h>
102#include <VBox/err.h>
103#include <VBox/param.h>
104#include <iprt/assert.h>
105#include <iprt/string.h>
106#include <iprt/x86.h>
107
108
109/*******************************************************************************
110* Structures and Typedefs *
111*******************************************************************************/
112/** @typedef PFNIEMOP
113 * Pointer to an opcode decoder function.
114 */
115
116/** @def FNIEMOP_DEF
117 * Define an opcode decoder function.
118 *
119 * We're using macors for this so that adding and removing parameters as well as
120 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
121 *
122 * @param a_Name The function name.
123 */
124
125
126#if defined(__GNUC__) && defined(RT_ARCH_X86)
127typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
128# define FNIEMOP_DEF(a_Name) \
129 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
130# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
131 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
132# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
133 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
134
135#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
143
144#elif defined(__GNUC__)
145typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
152
153#else
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
161
162#endif
163
164
165/**
166 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
167 */
168typedef union IEMSELDESC
169{
170 /** The legacy view. */
171 X86DESC Legacy;
172 /** The long mode view. */
173 X86DESC64 Long;
174} IEMSELDESC;
175/** Pointer to a selector descriptor table entry. */
176typedef IEMSELDESC *PIEMSELDESC;
177
178
179/*******************************************************************************
180* Defined Constants And Macros *
181*******************************************************************************/
182/** @name IEM status codes.
183 *
184 * Not quite sure how this will play out in the end, just aliasing safe status
185 * codes for now.
186 *
187 * @{ */
188#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
189/** @} */
190
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogFunc(a_LoggerArgs); \
224 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
225 } while (0)
226#else
227# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
228 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
229#endif
230
231/**
232 * Call an opcode decoder function.
233 *
234 * We're using macors for this so that adding and removing parameters can be
235 * done as we please. See FNIEMOP_DEF.
236 */
237#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
238
239/**
240 * Call a common opcode decoder function taking one extra argument.
241 *
242 * We're using macors for this so that adding and removing parameters can be
243 * done as we please. See FNIEMOP_DEF_1.
244 */
245#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
246
247/**
248 * Call a common opcode decoder function taking one extra argument.
249 *
250 * We're using macors for this so that adding and removing parameters can be
251 * done as we please. See FNIEMOP_DEF_1.
252 */
253#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
254
255/**
256 * Check if we're currently executing in real or virtual 8086 mode.
257 *
258 * @returns @c true if it is, @c false if not.
259 * @param a_pIemCpu The IEM state of the current CPU.
260 */
261#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
262
263/**
264 * Check if we're currently executing in long mode.
265 *
266 * @returns @c true if it is, @c false if not.
267 * @param a_pIemCpu The IEM state of the current CPU.
268 */
269#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
270
271/**
272 * Check if we're currently executing in real mode.
273 *
274 * @returns @c true if it is, @c false if not.
275 * @param a_pIemCpu The IEM state of the current CPU.
276 */
277#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
278
279/**
280 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
281 */
282#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
283
284/**
285 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
286 */
287#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
288
289/**
290 * Tests if at least on of the specified AMD CPUID features (extended) are
291 * marked present.
292 */
293#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
294
295/**
296 * Checks if an Intel CPUID feature is present.
297 */
298#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
299 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
300 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325static const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334static const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343static const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352static const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361static const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370static const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379static const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390static const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401static const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410static const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419static const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428static const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437static const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446static const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455static const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464static const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486static const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495static const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504static const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513static const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523static const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532static const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559static const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568static const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577static const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587static const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596static const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605static const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
640/** What IEM just wrote. */
641uint8_t g_abIemWrote[256];
642/** How much IEM just wrote. */
643size_t g_cbIemWrote;
644#endif
645
646
647/*******************************************************************************
648* Internal Functions *
649*******************************************************************************/
650static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
651/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
652static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
653static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
654static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
655static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
656static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
657static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
658static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
659static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
660static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
661static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
662static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
663static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
664static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
665static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
666static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
667static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
668static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
669static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
670static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
671static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
672static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
673static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
674
675#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
676static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
677#endif
678static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
679static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
680
681
682/**
683 * Sets the pass up status.
684 *
685 * @returns VINF_SUCCESS.
686 * @param pIemCpu The per CPU IEM state of the calling thread.
687 * @param rcPassUp The pass up status. Must be informational.
688 * VINF_SUCCESS is not allowed.
689 */
690static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
691{
692 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
693
694 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
695 if (rcOldPassUp == VINF_SUCCESS)
696 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
697 /* If both are EM scheduling code, use EM priority rules. */
698 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
699 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
700 {
701 if (rcPassUp < rcOldPassUp)
702 {
703 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
704 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
705 }
706 else
707 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
708 }
709 /* Override EM scheduling with specific status code. */
710 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
711 {
712 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
713 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
714 }
715 /* Don't override specific status code, first come first served. */
716 else
717 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
718 return VINF_SUCCESS;
719}
720
721
722/**
723 * Initializes the decoder state.
724 *
725 * @param pIemCpu The per CPU IEM state.
726 * @param fBypassHandlers Whether to bypass access handlers.
727 */
728DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
732
733#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
734 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
735 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
736 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
737 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
738 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
739 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
740 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
741 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
742#endif
743
744#ifdef VBOX_WITH_RAW_MODE_NOT_R0
745 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
746#endif
747 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
748#ifdef IEM_VERIFICATION_MODE_FULL
749 if (pIemCpu->uInjectCpl != UINT8_MAX)
750 pIemCpu->uCpl = pIemCpu->uInjectCpl;
751#endif
752 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
753 ? IEMMODE_64BIT
754 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
755 ? IEMMODE_32BIT
756 : IEMMODE_16BIT;
757 pIemCpu->enmCpuMode = enmMode;
758 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
759 pIemCpu->enmEffAddrMode = enmMode;
760 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
761 pIemCpu->enmEffOpSize = enmMode;
762 pIemCpu->fPrefixes = 0;
763 pIemCpu->uRexReg = 0;
764 pIemCpu->uRexB = 0;
765 pIemCpu->uRexIndex = 0;
766 pIemCpu->iEffSeg = X86_SREG_DS;
767 pIemCpu->offOpcode = 0;
768 pIemCpu->cbOpcode = 0;
769 pIemCpu->cActiveMappings = 0;
770 pIemCpu->iNextMapping = 0;
771 pIemCpu->rcPassUp = VINF_SUCCESS;
772 pIemCpu->fBypassHandlers = fBypassHandlers;
773#ifdef IN_RC
774 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
775 && pCtx->cs.u64Base == 0
776 && pCtx->cs.u32Limit == UINT32_MAX
777 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
778 if (!pIemCpu->fInPatchCode)
779 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
780#endif
781}
782
783
784/**
785 * Prefetch opcodes the first time when starting executing.
786 *
787 * @returns Strict VBox status code.
788 * @param pIemCpu The IEM state.
789 * @param fBypassHandlers Whether to bypass access handlers.
790 */
791static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793#ifdef IEM_VERIFICATION_MODE_FULL
794 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
795#endif
796 iemInitDecoder(pIemCpu, fBypassHandlers);
797
798 /*
799 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
800 *
801 * First translate CS:rIP to a physical address.
802 */
803 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
804 uint32_t cbToTryRead;
805 RTGCPTR GCPtrPC;
806 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
807 {
808 cbToTryRead = PAGE_SIZE;
809 GCPtrPC = pCtx->rip;
810 if (!IEM_IS_CANONICAL(GCPtrPC))
811 return iemRaiseGeneralProtectionFault0(pIemCpu);
812 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
813 }
814 else
815 {
816 uint32_t GCPtrPC32 = pCtx->eip;
817 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
818 if (GCPtrPC32 > pCtx->cs.u32Limit)
819 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
820 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
821 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
822 }
823
824#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
825 /* Allow interpretation of patch manager code blocks since they can for
826 instance throw #PFs for perfectly good reasons. */
827 if (pIemCpu->fInPatchCode)
828 {
829 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
830 if (cbToTryRead > cbLeftOnPage)
831 cbToTryRead = cbLeftOnPage;
832 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
833 cbToTryRead = sizeof(pIemCpu->abOpcode);
834 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
835 pIemCpu->cbOpcode = cbToTryRead;
836 return VINF_SUCCESS;
837 }
838#endif
839
840 RTGCPHYS GCPhys;
841 uint64_t fFlags;
842 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
843 if (RT_FAILURE(rc))
844 {
845 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
846 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
847 }
848 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
849 {
850 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
851 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
852 }
853 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
854 {
855 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
856 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
857 }
858 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
859 /** @todo Check reserved bits and such stuff. PGM is better at doing
860 * that, so do it when implementing the guest virtual address
861 * TLB... */
862
863#ifdef IEM_VERIFICATION_MODE_FULL
864 /*
865 * Optimistic optimization: Use unconsumed opcode bytes from the previous
866 * instruction.
867 */
868 /** @todo optimize this differently by not using PGMPhysRead. */
869 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
870 pIemCpu->GCPhysOpcodes = GCPhys;
871 if ( offPrevOpcodes < cbOldOpcodes
872 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
873 {
874 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
875 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
876 pIemCpu->cbOpcode = cbNew;
877 return VINF_SUCCESS;
878 }
879#endif
880
881 /*
882 * Read the bytes at this address.
883 */
884 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
885 if (cbToTryRead > cbLeftOnPage)
886 cbToTryRead = cbLeftOnPage;
887 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
888 cbToTryRead = sizeof(pIemCpu->abOpcode);
889 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
890 * doing that. */
891 if (!pIemCpu->fBypassHandlers)
892 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
893 else
894 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
895 if (rc != VINF_SUCCESS)
896 {
897 /** @todo status code handling */
898 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
899 GCPtrPC, GCPhys, rc, cbToTryRead));
900 return rc;
901 }
902 pIemCpu->cbOpcode = cbToTryRead;
903
904 return VINF_SUCCESS;
905}
906
907
908/**
909 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
910 * exception if it fails.
911 *
912 * @returns Strict VBox status code.
913 * @param pIemCpu The IEM state.
914 * @param cbMin Where to return the opcode byte.
915 */
916static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
917{
918 /*
919 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
920 *
921 * First translate CS:rIP to a physical address.
922 */
923 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
924 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
925 uint32_t cbToTryRead;
926 RTGCPTR GCPtrNext;
927 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
928 {
929 cbToTryRead = PAGE_SIZE;
930 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
931 if (!IEM_IS_CANONICAL(GCPtrNext))
932 return iemRaiseGeneralProtectionFault0(pIemCpu);
933 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
934 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
935 }
936 else
937 {
938 uint32_t GCPtrNext32 = pCtx->eip;
939 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
940 GCPtrNext32 += pIemCpu->cbOpcode;
941 if (GCPtrNext32 > pCtx->cs.u32Limit)
942 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
943 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
944 if (cbToTryRead < cbMin - cbLeft)
945 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
946 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
947 }
948
949 RTGCPHYS GCPhys;
950 uint64_t fFlags;
951 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
952 if (RT_FAILURE(rc))
953 {
954 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
955 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
956 }
957 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
958 {
959 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
960 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
965 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
968 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
969 /** @todo Check reserved bits and such stuff. PGM is better at doing
970 * that, so do it when implementing the guest virtual address
971 * TLB... */
972
973 /*
974 * Read the bytes at this address.
975 */
976 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
977 if (cbToTryRead > cbLeftOnPage)
978 cbToTryRead = cbLeftOnPage;
979 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
980 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
981 Assert(cbToTryRead >= cbMin - cbLeft);
982 if (!pIemCpu->fBypassHandlers)
983 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
984 else
985 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
986 if (rc != VINF_SUCCESS)
987 {
988 /** @todo status code handling */
989 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
990 return rc;
991 }
992 pIemCpu->cbOpcode += cbToTryRead;
993 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
994
995 return VINF_SUCCESS;
996}
997
998
999/**
1000 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1001 *
1002 * @returns Strict VBox status code.
1003 * @param pIemCpu The IEM state.
1004 * @param pb Where to return the opcode byte.
1005 */
1006DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1007{
1008 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1009 if (rcStrict == VINF_SUCCESS)
1010 {
1011 uint8_t offOpcode = pIemCpu->offOpcode;
1012 *pb = pIemCpu->abOpcode[offOpcode];
1013 pIemCpu->offOpcode = offOpcode + 1;
1014 }
1015 else
1016 *pb = 0;
1017 return rcStrict;
1018}
1019
1020
1021/**
1022 * Fetches the next opcode byte.
1023 *
1024 * @returns Strict VBox status code.
1025 * @param pIemCpu The IEM state.
1026 * @param pu8 Where to return the opcode byte.
1027 */
1028DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1029{
1030 uint8_t const offOpcode = pIemCpu->offOpcode;
1031 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1032 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1033
1034 *pu8 = pIemCpu->abOpcode[offOpcode];
1035 pIemCpu->offOpcode = offOpcode + 1;
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Fetches the next opcode byte, returns automatically on failure.
1042 *
1043 * @param a_pu8 Where to return the opcode byte.
1044 * @remark Implicitly references pIemCpu.
1045 */
1046#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1047 do \
1048 { \
1049 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1050 if (rcStrict2 != VINF_SUCCESS) \
1051 return rcStrict2; \
1052 } while (0)
1053
1054
1055/**
1056 * Fetches the next signed byte from the opcode stream.
1057 *
1058 * @returns Strict VBox status code.
1059 * @param pIemCpu The IEM state.
1060 * @param pi8 Where to return the signed byte.
1061 */
1062DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1063{
1064 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1065}
1066
1067
1068/**
1069 * Fetches the next signed byte from the opcode stream, returning automatically
1070 * on failure.
1071 *
1072 * @param pi8 Where to return the signed byte.
1073 * @remark Implicitly references pIemCpu.
1074 */
1075#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1076 do \
1077 { \
1078 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1079 if (rcStrict2 != VINF_SUCCESS) \
1080 return rcStrict2; \
1081 } while (0)
1082
1083
1084/**
1085 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1086 *
1087 * @returns Strict VBox status code.
1088 * @param pIemCpu The IEM state.
1089 * @param pu16 Where to return the opcode dword.
1090 */
1091DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1092{
1093 uint8_t u8;
1094 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1095 if (rcStrict == VINF_SUCCESS)
1096 *pu16 = (int8_t)u8;
1097 return rcStrict;
1098}
1099
1100
1101/**
1102 * Fetches the next signed byte from the opcode stream, extending it to
1103 * unsigned 16-bit.
1104 *
1105 * @returns Strict VBox status code.
1106 * @param pIemCpu The IEM state.
1107 * @param pu16 Where to return the unsigned word.
1108 */
1109DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1110{
1111 uint8_t const offOpcode = pIemCpu->offOpcode;
1112 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1113 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1114
1115 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1116 pIemCpu->offOpcode = offOpcode + 1;
1117 return VINF_SUCCESS;
1118}
1119
1120
1121/**
1122 * Fetches the next signed byte from the opcode stream and sign-extending it to
1123 * a word, returning automatically on failure.
1124 *
1125 * @param pu16 Where to return the word.
1126 * @remark Implicitly references pIemCpu.
1127 */
1128#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1129 do \
1130 { \
1131 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1132 if (rcStrict2 != VINF_SUCCESS) \
1133 return rcStrict2; \
1134 } while (0)
1135
1136
1137/**
1138 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1139 *
1140 * @returns Strict VBox status code.
1141 * @param pIemCpu The IEM state.
1142 * @param pu32 Where to return the opcode dword.
1143 */
1144DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1145{
1146 uint8_t u8;
1147 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1148 if (rcStrict == VINF_SUCCESS)
1149 *pu32 = (int8_t)u8;
1150 return rcStrict;
1151}
1152
1153
1154/**
1155 * Fetches the next signed byte from the opcode stream, extending it to
1156 * unsigned 32-bit.
1157 *
1158 * @returns Strict VBox status code.
1159 * @param pIemCpu The IEM state.
1160 * @param pu32 Where to return the unsigned dword.
1161 */
1162DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1163{
1164 uint8_t const offOpcode = pIemCpu->offOpcode;
1165 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1166 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1167
1168 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1169 pIemCpu->offOpcode = offOpcode + 1;
1170 return VINF_SUCCESS;
1171}
1172
1173
1174/**
1175 * Fetches the next signed byte from the opcode stream and sign-extending it to
1176 * a word, returning automatically on failure.
1177 *
1178 * @param pu32 Where to return the word.
1179 * @remark Implicitly references pIemCpu.
1180 */
1181#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1182 do \
1183 { \
1184 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1185 if (rcStrict2 != VINF_SUCCESS) \
1186 return rcStrict2; \
1187 } while (0)
1188
1189
1190/**
1191 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1192 *
1193 * @returns Strict VBox status code.
1194 * @param pIemCpu The IEM state.
1195 * @param pu64 Where to return the opcode qword.
1196 */
1197DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1198{
1199 uint8_t u8;
1200 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1201 if (rcStrict == VINF_SUCCESS)
1202 *pu64 = (int8_t)u8;
1203 return rcStrict;
1204}
1205
1206
1207/**
1208 * Fetches the next signed byte from the opcode stream, extending it to
1209 * unsigned 64-bit.
1210 *
1211 * @returns Strict VBox status code.
1212 * @param pIemCpu The IEM state.
1213 * @param pu64 Where to return the unsigned qword.
1214 */
1215DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1216{
1217 uint8_t const offOpcode = pIemCpu->offOpcode;
1218 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1219 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1220
1221 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1222 pIemCpu->offOpcode = offOpcode + 1;
1223 return VINF_SUCCESS;
1224}
1225
1226
1227/**
1228 * Fetches the next signed byte from the opcode stream and sign-extending it to
1229 * a word, returning automatically on failure.
1230 *
1231 * @param pu64 Where to return the word.
1232 * @remark Implicitly references pIemCpu.
1233 */
1234#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1235 do \
1236 { \
1237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1238 if (rcStrict2 != VINF_SUCCESS) \
1239 return rcStrict2; \
1240 } while (0)
1241
1242
1243/**
1244 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1245 *
1246 * @returns Strict VBox status code.
1247 * @param pIemCpu The IEM state.
1248 * @param pu16 Where to return the opcode word.
1249 */
1250DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1251{
1252 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1253 if (rcStrict == VINF_SUCCESS)
1254 {
1255 uint8_t offOpcode = pIemCpu->offOpcode;
1256 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1257 pIemCpu->offOpcode = offOpcode + 2;
1258 }
1259 else
1260 *pu16 = 0;
1261 return rcStrict;
1262}
1263
1264
1265/**
1266 * Fetches the next opcode word.
1267 *
1268 * @returns Strict VBox status code.
1269 * @param pIemCpu The IEM state.
1270 * @param pu16 Where to return the opcode word.
1271 */
1272DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1273{
1274 uint8_t const offOpcode = pIemCpu->offOpcode;
1275 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1276 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1277
1278 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1279 pIemCpu->offOpcode = offOpcode + 2;
1280 return VINF_SUCCESS;
1281}
1282
1283
1284/**
1285 * Fetches the next opcode word, returns automatically on failure.
1286 *
1287 * @param a_pu16 Where to return the opcode word.
1288 * @remark Implicitly references pIemCpu.
1289 */
1290#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1291 do \
1292 { \
1293 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1294 if (rcStrict2 != VINF_SUCCESS) \
1295 return rcStrict2; \
1296 } while (0)
1297
1298
1299/**
1300 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1301 *
1302 * @returns Strict VBox status code.
1303 * @param pIemCpu The IEM state.
1304 * @param pu32 Where to return the opcode double word.
1305 */
1306DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1307{
1308 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1309 if (rcStrict == VINF_SUCCESS)
1310 {
1311 uint8_t offOpcode = pIemCpu->offOpcode;
1312 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1313 pIemCpu->offOpcode = offOpcode + 2;
1314 }
1315 else
1316 *pu32 = 0;
1317 return rcStrict;
1318}
1319
1320
1321/**
1322 * Fetches the next opcode word, zero extending it to a double word.
1323 *
1324 * @returns Strict VBox status code.
1325 * @param pIemCpu The IEM state.
1326 * @param pu32 Where to return the opcode double word.
1327 */
1328DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1329{
1330 uint8_t const offOpcode = pIemCpu->offOpcode;
1331 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1332 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1333
1334 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1335 pIemCpu->offOpcode = offOpcode + 2;
1336 return VINF_SUCCESS;
1337}
1338
1339
1340/**
1341 * Fetches the next opcode word and zero extends it to a double word, returns
1342 * automatically on failure.
1343 *
1344 * @param a_pu32 Where to return the opcode double word.
1345 * @remark Implicitly references pIemCpu.
1346 */
1347#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1348 do \
1349 { \
1350 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1351 if (rcStrict2 != VINF_SUCCESS) \
1352 return rcStrict2; \
1353 } while (0)
1354
1355
1356/**
1357 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu64 Where to return the opcode quad word.
1362 */
1363DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1364{
1365 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1366 if (rcStrict == VINF_SUCCESS)
1367 {
1368 uint8_t offOpcode = pIemCpu->offOpcode;
1369 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1370 pIemCpu->offOpcode = offOpcode + 2;
1371 }
1372 else
1373 *pu64 = 0;
1374 return rcStrict;
1375}
1376
1377
1378/**
1379 * Fetches the next opcode word, zero extending it to a quad word.
1380 *
1381 * @returns Strict VBox status code.
1382 * @param pIemCpu The IEM state.
1383 * @param pu64 Where to return the opcode quad word.
1384 */
1385DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1386{
1387 uint8_t const offOpcode = pIemCpu->offOpcode;
1388 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1389 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1390
1391 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1392 pIemCpu->offOpcode = offOpcode + 2;
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Fetches the next opcode word and zero extends it to a quad word, returns
1399 * automatically on failure.
1400 *
1401 * @param a_pu64 Where to return the opcode quad word.
1402 * @remark Implicitly references pIemCpu.
1403 */
1404#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1405 do \
1406 { \
1407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1408 if (rcStrict2 != VINF_SUCCESS) \
1409 return rcStrict2; \
1410 } while (0)
1411
1412
1413/**
1414 * Fetches the next signed word from the opcode stream.
1415 *
1416 * @returns Strict VBox status code.
1417 * @param pIemCpu The IEM state.
1418 * @param pi16 Where to return the signed word.
1419 */
1420DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1421{
1422 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1423}
1424
1425
1426/**
1427 * Fetches the next signed word from the opcode stream, returning automatically
1428 * on failure.
1429 *
1430 * @param pi16 Where to return the signed word.
1431 * @remark Implicitly references pIemCpu.
1432 */
1433#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1434 do \
1435 { \
1436 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1437 if (rcStrict2 != VINF_SUCCESS) \
1438 return rcStrict2; \
1439 } while (0)
1440
1441
1442/**
1443 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pIemCpu The IEM state.
1447 * @param pu32 Where to return the opcode dword.
1448 */
1449DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1450{
1451 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1452 if (rcStrict == VINF_SUCCESS)
1453 {
1454 uint8_t offOpcode = pIemCpu->offOpcode;
1455 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1456 pIemCpu->abOpcode[offOpcode + 1],
1457 pIemCpu->abOpcode[offOpcode + 2],
1458 pIemCpu->abOpcode[offOpcode + 3]);
1459 pIemCpu->offOpcode = offOpcode + 4;
1460 }
1461 else
1462 *pu32 = 0;
1463 return rcStrict;
1464}
1465
1466
1467/**
1468 * Fetches the next opcode dword.
1469 *
1470 * @returns Strict VBox status code.
1471 * @param pIemCpu The IEM state.
1472 * @param pu32 Where to return the opcode double word.
1473 */
1474DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1475{
1476 uint8_t const offOpcode = pIemCpu->offOpcode;
1477 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1478 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1479
1480 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1481 pIemCpu->abOpcode[offOpcode + 1],
1482 pIemCpu->abOpcode[offOpcode + 2],
1483 pIemCpu->abOpcode[offOpcode + 3]);
1484 pIemCpu->offOpcode = offOpcode + 4;
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/**
1490 * Fetches the next opcode dword, returns automatically on failure.
1491 *
1492 * @param a_pu32 Where to return the opcode dword.
1493 * @remark Implicitly references pIemCpu.
1494 */
1495#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1496 do \
1497 { \
1498 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1499 if (rcStrict2 != VINF_SUCCESS) \
1500 return rcStrict2; \
1501 } while (0)
1502
1503
1504/**
1505 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1506 *
1507 * @returns Strict VBox status code.
1508 * @param pIemCpu The IEM state.
1509 * @param pu32 Where to return the opcode dword.
1510 */
1511DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1512{
1513 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1514 if (rcStrict == VINF_SUCCESS)
1515 {
1516 uint8_t offOpcode = pIemCpu->offOpcode;
1517 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1518 pIemCpu->abOpcode[offOpcode + 1],
1519 pIemCpu->abOpcode[offOpcode + 2],
1520 pIemCpu->abOpcode[offOpcode + 3]);
1521 pIemCpu->offOpcode = offOpcode + 4;
1522 }
1523 else
1524 *pu64 = 0;
1525 return rcStrict;
1526}
1527
1528
1529/**
1530 * Fetches the next opcode dword, zero extending it to a quad word.
1531 *
1532 * @returns Strict VBox status code.
1533 * @param pIemCpu The IEM state.
1534 * @param pu64 Where to return the opcode quad word.
1535 */
1536DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1537{
1538 uint8_t const offOpcode = pIemCpu->offOpcode;
1539 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1540 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1541
1542 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1543 pIemCpu->abOpcode[offOpcode + 1],
1544 pIemCpu->abOpcode[offOpcode + 2],
1545 pIemCpu->abOpcode[offOpcode + 3]);
1546 pIemCpu->offOpcode = offOpcode + 4;
1547 return VINF_SUCCESS;
1548}
1549
1550
1551/**
1552 * Fetches the next opcode dword and zero extends it to a quad word, returns
1553 * automatically on failure.
1554 *
1555 * @param a_pu64 Where to return the opcode quad word.
1556 * @remark Implicitly references pIemCpu.
1557 */
1558#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1559 do \
1560 { \
1561 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1562 if (rcStrict2 != VINF_SUCCESS) \
1563 return rcStrict2; \
1564 } while (0)
1565
1566
1567/**
1568 * Fetches the next signed double word from the opcode stream.
1569 *
1570 * @returns Strict VBox status code.
1571 * @param pIemCpu The IEM state.
1572 * @param pi32 Where to return the signed double word.
1573 */
1574DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1575{
1576 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1577}
1578
1579/**
1580 * Fetches the next signed double word from the opcode stream, returning
1581 * automatically on failure.
1582 *
1583 * @param pi32 Where to return the signed double word.
1584 * @remark Implicitly references pIemCpu.
1585 */
1586#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1587 do \
1588 { \
1589 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1590 if (rcStrict2 != VINF_SUCCESS) \
1591 return rcStrict2; \
1592 } while (0)
1593
1594
1595/**
1596 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1597 *
1598 * @returns Strict VBox status code.
1599 * @param pIemCpu The IEM state.
1600 * @param pu64 Where to return the opcode qword.
1601 */
1602DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1603{
1604 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1605 if (rcStrict == VINF_SUCCESS)
1606 {
1607 uint8_t offOpcode = pIemCpu->offOpcode;
1608 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1609 pIemCpu->abOpcode[offOpcode + 1],
1610 pIemCpu->abOpcode[offOpcode + 2],
1611 pIemCpu->abOpcode[offOpcode + 3]);
1612 pIemCpu->offOpcode = offOpcode + 4;
1613 }
1614 else
1615 *pu64 = 0;
1616 return rcStrict;
1617}
1618
1619
1620/**
1621 * Fetches the next opcode dword, sign extending it into a quad word.
1622 *
1623 * @returns Strict VBox status code.
1624 * @param pIemCpu The IEM state.
1625 * @param pu64 Where to return the opcode quad word.
1626 */
1627DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1628{
1629 uint8_t const offOpcode = pIemCpu->offOpcode;
1630 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1631 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1632
1633 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1634 pIemCpu->abOpcode[offOpcode + 1],
1635 pIemCpu->abOpcode[offOpcode + 2],
1636 pIemCpu->abOpcode[offOpcode + 3]);
1637 *pu64 = i32;
1638 pIemCpu->offOpcode = offOpcode + 4;
1639 return VINF_SUCCESS;
1640}
1641
1642
1643/**
1644 * Fetches the next opcode double word and sign extends it to a quad word,
1645 * returns automatically on failure.
1646 *
1647 * @param a_pu64 Where to return the opcode quad word.
1648 * @remark Implicitly references pIemCpu.
1649 */
1650#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1651 do \
1652 { \
1653 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1654 if (rcStrict2 != VINF_SUCCESS) \
1655 return rcStrict2; \
1656 } while (0)
1657
1658
1659/**
1660 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1661 *
1662 * @returns Strict VBox status code.
1663 * @param pIemCpu The IEM state.
1664 * @param pu64 Where to return the opcode qword.
1665 */
1666DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1667{
1668 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1669 if (rcStrict == VINF_SUCCESS)
1670 {
1671 uint8_t offOpcode = pIemCpu->offOpcode;
1672 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1673 pIemCpu->abOpcode[offOpcode + 1],
1674 pIemCpu->abOpcode[offOpcode + 2],
1675 pIemCpu->abOpcode[offOpcode + 3],
1676 pIemCpu->abOpcode[offOpcode + 4],
1677 pIemCpu->abOpcode[offOpcode + 5],
1678 pIemCpu->abOpcode[offOpcode + 6],
1679 pIemCpu->abOpcode[offOpcode + 7]);
1680 pIemCpu->offOpcode = offOpcode + 8;
1681 }
1682 else
1683 *pu64 = 0;
1684 return rcStrict;
1685}
1686
1687
1688/**
1689 * Fetches the next opcode qword.
1690 *
1691 * @returns Strict VBox status code.
1692 * @param pIemCpu The IEM state.
1693 * @param pu64 Where to return the opcode qword.
1694 */
1695DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1696{
1697 uint8_t const offOpcode = pIemCpu->offOpcode;
1698 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1699 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1700
1701 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1702 pIemCpu->abOpcode[offOpcode + 1],
1703 pIemCpu->abOpcode[offOpcode + 2],
1704 pIemCpu->abOpcode[offOpcode + 3],
1705 pIemCpu->abOpcode[offOpcode + 4],
1706 pIemCpu->abOpcode[offOpcode + 5],
1707 pIemCpu->abOpcode[offOpcode + 6],
1708 pIemCpu->abOpcode[offOpcode + 7]);
1709 pIemCpu->offOpcode = offOpcode + 8;
1710 return VINF_SUCCESS;
1711}
1712
1713
1714/**
1715 * Fetches the next opcode quad word, returns automatically on failure.
1716 *
1717 * @param a_pu64 Where to return the opcode quad word.
1718 * @remark Implicitly references pIemCpu.
1719 */
1720#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1721 do \
1722 { \
1723 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1724 if (rcStrict2 != VINF_SUCCESS) \
1725 return rcStrict2; \
1726 } while (0)
1727
1728
1729/** @name Misc Worker Functions.
1730 * @{
1731 */
1732
1733
1734/**
1735 * Validates a new SS segment.
1736 *
1737 * @returns VBox strict status code.
1738 * @param pIemCpu The IEM per CPU instance data.
1739 * @param pCtx The CPU context.
1740 * @param NewSS The new SS selctor.
1741 * @param uCpl The CPL to load the stack for.
1742 * @param pDesc Where to return the descriptor.
1743 */
1744static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1745{
1746 NOREF(pCtx);
1747
1748 /* Null selectors are not allowed (we're not called for dispatching
1749 interrupts with SS=0 in long mode). */
1750 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1751 {
1752 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1753 return iemRaiseGeneralProtectionFault0(pIemCpu);
1754 }
1755
1756 /*
1757 * Read the descriptor.
1758 */
1759 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1760 if (rcStrict != VINF_SUCCESS)
1761 return rcStrict;
1762
1763 /*
1764 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1765 */
1766 if (!pDesc->Legacy.Gen.u1DescType)
1767 {
1768 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1769 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1770 }
1771
1772 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1773 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1774 {
1775 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1776 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1777 }
1778 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1779 if ((NewSS & X86_SEL_RPL) != uCpl)
1780 {
1781 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1782 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1783 }
1784 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1785 {
1786 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1787 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1788 }
1789
1790 /* Is it there? */
1791 /** @todo testcase: Is this checked before the canonical / limit check below? */
1792 if (!pDesc->Legacy.Gen.u1Present)
1793 {
1794 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1795 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1796 }
1797
1798 return VINF_SUCCESS;
1799}
1800
1801
1802/**
1803 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1804 * not.
1805 *
1806 * @param a_pIemCpu The IEM per CPU data.
1807 * @param a_pCtx The CPU context.
1808 */
1809#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1810# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1811 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1812 ? (a_pCtx)->eflags.u \
1813 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1814#else
1815# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1816 ( (a_pCtx)->eflags.u )
1817#endif
1818
1819/**
1820 * Updates the EFLAGS in the correct manner wrt. PATM.
1821 *
1822 * @param a_pIemCpu The IEM per CPU data.
1823 * @param a_pCtx The CPU context.
1824 */
1825#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1826# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1827 do { \
1828 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1829 (a_pCtx)->eflags.u = (a_fEfl); \
1830 else \
1831 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1832 } while (0)
1833#else
1834# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1835 do { \
1836 (a_pCtx)->eflags.u = (a_fEfl); \
1837 } while (0)
1838#endif
1839
1840
1841/** @} */
1842
1843/** @name Raising Exceptions.
1844 *
1845 * @{
1846 */
1847
1848/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1849 * @{ */
1850/** CPU exception. */
1851#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1852/** External interrupt (from PIC, APIC, whatever). */
1853#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1854/** Software interrupt (int, into or bound). */
1855#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1856/** Takes an error code. */
1857#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1858/** Takes a CR2. */
1859#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1860/** Generated by the breakpoint instruction. */
1861#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1862/** @} */
1863
1864/**
1865 * Loads the specified stack far pointer from the TSS.
1866 *
1867 * @returns VBox strict status code.
1868 * @param pIemCpu The IEM per CPU instance data.
1869 * @param pCtx The CPU context.
1870 * @param uCpl The CPL to load the stack for.
1871 * @param pSelSS Where to return the new stack segment.
1872 * @param puEsp Where to return the new stack pointer.
1873 */
1874static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1875 PRTSEL pSelSS, uint32_t *puEsp)
1876{
1877 VBOXSTRICTRC rcStrict;
1878 Assert(uCpl < 4);
1879 *puEsp = 0; /* make gcc happy */
1880 *pSelSS = 0; /* make gcc happy */
1881
1882 switch (pCtx->tr.Attr.n.u4Type)
1883 {
1884 /*
1885 * 16-bit TSS (X86TSS16).
1886 */
1887 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1888 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1889 {
1890 uint32_t off = uCpl * 4 + 2;
1891 if (off + 4 > pCtx->tr.u32Limit)
1892 {
1893 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1894 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1895 }
1896
1897 uint32_t u32Tmp = 0; /* gcc maybe... */
1898 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1899 if (rcStrict == VINF_SUCCESS)
1900 {
1901 *puEsp = RT_LOWORD(u32Tmp);
1902 *pSelSS = RT_HIWORD(u32Tmp);
1903 return VINF_SUCCESS;
1904 }
1905 break;
1906 }
1907
1908 /*
1909 * 32-bit TSS (X86TSS32).
1910 */
1911 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1912 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1913 {
1914 uint32_t off = uCpl * 8 + 4;
1915 if (off + 7 > pCtx->tr.u32Limit)
1916 {
1917 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1918 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1919 }
1920
1921 uint64_t u64Tmp;
1922 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 *puEsp = u64Tmp & UINT32_MAX;
1926 *pSelSS = (RTSEL)(u64Tmp >> 32);
1927 return VINF_SUCCESS;
1928 }
1929 break;
1930 }
1931
1932 default:
1933 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1934 }
1935 return rcStrict;
1936}
1937
1938
1939/**
1940 * Adjust the CPU state according to the exception being raised.
1941 *
1942 * @param pCtx The CPU context.
1943 * @param u8Vector The exception that has been raised.
1944 */
1945DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1946{
1947 switch (u8Vector)
1948 {
1949 case X86_XCPT_DB:
1950 pCtx->dr[7] &= ~X86_DR7_GD;
1951 break;
1952 /** @todo Read the AMD and Intel exception reference... */
1953 }
1954}
1955
1956
1957/**
1958 * Implements exceptions and interrupts for real mode.
1959 *
1960 * @returns VBox strict status code.
1961 * @param pIemCpu The IEM per CPU instance data.
1962 * @param pCtx The CPU context.
1963 * @param cbInstr The number of bytes to offset rIP by in the return
1964 * address.
1965 * @param u8Vector The interrupt / exception vector number.
1966 * @param fFlags The flags.
1967 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1968 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1969 */
1970static VBOXSTRICTRC
1971iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1972 PCPUMCTX pCtx,
1973 uint8_t cbInstr,
1974 uint8_t u8Vector,
1975 uint32_t fFlags,
1976 uint16_t uErr,
1977 uint64_t uCr2)
1978{
1979 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1980 NOREF(uErr); NOREF(uCr2);
1981
1982 /*
1983 * Read the IDT entry.
1984 */
1985 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1986 {
1987 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1988 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1989 }
1990 RTFAR16 Idte;
1991 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1992 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1993 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1994 return rcStrict;
1995
1996 /*
1997 * Push the stack frame.
1998 */
1999 uint16_t *pu16Frame;
2000 uint64_t uNewRsp;
2001 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2002 if (rcStrict != VINF_SUCCESS)
2003 return rcStrict;
2004
2005 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2006 pu16Frame[2] = (uint16_t)fEfl;
2007 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2008 pu16Frame[0] = pCtx->ip + cbInstr;
2009 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2010 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2011 return rcStrict;
2012
2013 /*
2014 * Load the vector address into cs:ip and make exception specific state
2015 * adjustments.
2016 */
2017 pCtx->cs.Sel = Idte.sel;
2018 pCtx->cs.ValidSel = Idte.sel;
2019 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2020 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2021 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2022 pCtx->rip = Idte.off;
2023 fEfl &= ~X86_EFL_IF;
2024 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2025
2026 /** @todo do we actually do this in real mode? */
2027 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2028 iemRaiseXcptAdjustState(pCtx, u8Vector);
2029
2030 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2031}
2032
2033
2034/**
2035 * Implements exceptions and interrupts for protected mode.
2036 *
2037 * @returns VBox strict status code.
2038 * @param pIemCpu The IEM per CPU instance data.
2039 * @param pCtx The CPU context.
2040 * @param cbInstr The number of bytes to offset rIP by in the return
2041 * address.
2042 * @param u8Vector The interrupt / exception vector number.
2043 * @param fFlags The flags.
2044 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2045 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2046 */
2047static VBOXSTRICTRC
2048iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2049 PCPUMCTX pCtx,
2050 uint8_t cbInstr,
2051 uint8_t u8Vector,
2052 uint32_t fFlags,
2053 uint16_t uErr,
2054 uint64_t uCr2)
2055{
2056 NOREF(cbInstr);
2057
2058 /*
2059 * Read the IDT entry.
2060 */
2061 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2062 {
2063 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2064 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2065 }
2066 X86DESC Idte;
2067 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2068 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2069 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2070 return rcStrict;
2071 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2072 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2073 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2074
2075 /*
2076 * Check the descriptor type, DPL and such.
2077 * ASSUMES this is done in the same order as described for call-gate calls.
2078 */
2079 if (Idte.Gate.u1DescType)
2080 {
2081 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2082 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2083 }
2084 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2085 switch (Idte.Gate.u4Type)
2086 {
2087 case X86_SEL_TYPE_SYS_UNDEFINED:
2088 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2089 case X86_SEL_TYPE_SYS_LDT:
2090 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2091 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2092 case X86_SEL_TYPE_SYS_UNDEFINED2:
2093 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2094 case X86_SEL_TYPE_SYS_UNDEFINED3:
2095 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2096 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2097 case X86_SEL_TYPE_SYS_UNDEFINED4:
2098 {
2099 /** @todo check what actually happens when the type is wrong...
2100 * esp. call gates. */
2101 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2102 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2103 }
2104
2105 case X86_SEL_TYPE_SYS_286_INT_GATE:
2106 case X86_SEL_TYPE_SYS_386_INT_GATE:
2107 fEflToClear |= X86_EFL_IF;
2108 break;
2109
2110 case X86_SEL_TYPE_SYS_TASK_GATE:
2111 /** @todo task gates. */
2112 AssertFailedReturn(VERR_NOT_SUPPORTED);
2113
2114 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2115 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2116 break;
2117
2118 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2119 }
2120
2121 /* Check DPL against CPL if applicable. */
2122 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2123 {
2124 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2125 {
2126 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2127 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2128 }
2129 }
2130
2131 /* Is it there? */
2132 if (!Idte.Gate.u1Present)
2133 {
2134 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2135 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2136 }
2137
2138 /* A null CS is bad. */
2139 RTSEL NewCS = Idte.Gate.u16Sel;
2140 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2141 {
2142 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2143 return iemRaiseGeneralProtectionFault0(pIemCpu);
2144 }
2145
2146 /* Fetch the descriptor for the new CS. */
2147 IEMSELDESC DescCS;
2148 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2149 if (rcStrict != VINF_SUCCESS)
2150 {
2151 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2152 return rcStrict;
2153 }
2154
2155 /* Must be a code segment. */
2156 if (!DescCS.Legacy.Gen.u1DescType)
2157 {
2158 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2159 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2160 }
2161 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2162 {
2163 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2164 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2165 }
2166
2167 /* Don't allow lowering the privilege level. */
2168 /** @todo Does the lowering of privileges apply to software interrupts
2169 * only? This has bearings on the more-privileged or
2170 * same-privilege stack behavior further down. A testcase would
2171 * be nice. */
2172 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2173 {
2174 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2175 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2176 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2177 }
2178 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
2179
2180 /* Check the new EIP against the new CS limit. */
2181 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2182 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2183 ? Idte.Gate.u16OffsetLow
2184 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2185 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2186 if (uNewEip > cbLimitCS)
2187 {
2188 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2189 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2190 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2191 }
2192
2193 /* Make sure the selector is present. */
2194 if (!DescCS.Legacy.Gen.u1Present)
2195 {
2196 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2197 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2198 }
2199
2200 /*
2201 * If the privilege level changes, we need to get a new stack from the TSS.
2202 * This in turns means validating the new SS and ESP...
2203 */
2204 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2205 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2206 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2207 if (uNewCpl != pIemCpu->uCpl)
2208 {
2209 RTSEL NewSS;
2210 uint32_t uNewEsp;
2211 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2212 if (rcStrict != VINF_SUCCESS)
2213 return rcStrict;
2214
2215 IEMSELDESC DescSS;
2216 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2217 if (rcStrict != VINF_SUCCESS)
2218 return rcStrict;
2219
2220 /* Check that there is sufficient space for the stack frame. */
2221 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2222 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2223 {
2224 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2225 }
2226
2227 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2228 if ( uNewEsp - 1 > cbLimitSS
2229 || uNewEsp < cbStackFrame)
2230 {
2231 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2232 u8Vector, NewSS, uNewEsp, cbStackFrame));
2233 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2234 }
2235
2236 /*
2237 * Start making changes.
2238 */
2239
2240 /* Create the stack frame. */
2241 RTPTRUNION uStackFrame;
2242 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2243 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2244 if (rcStrict != VINF_SUCCESS)
2245 return rcStrict;
2246 void * const pvStackFrame = uStackFrame.pv;
2247
2248 if (fFlags & IEM_XCPT_FLAGS_ERR)
2249 *uStackFrame.pu32++ = uErr;
2250 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2251 ? pCtx->eip + cbInstr : pCtx->eip;
2252 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2253 uStackFrame.pu32[2] = fEfl;
2254 uStackFrame.pu32[3] = pCtx->esp;
2255 uStackFrame.pu32[4] = pCtx->ss.Sel;
2256 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2257 if (rcStrict != VINF_SUCCESS)
2258 return rcStrict;
2259
2260 /* Mark the selectors 'accessed' (hope this is the correct time). */
2261 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2262 * after pushing the stack frame? (Write protect the gdt + stack to
2263 * find out.) */
2264 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2265 {
2266 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2267 if (rcStrict != VINF_SUCCESS)
2268 return rcStrict;
2269 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2270 }
2271
2272 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2273 {
2274 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2275 if (rcStrict != VINF_SUCCESS)
2276 return rcStrict;
2277 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2278 }
2279
2280 /*
2281 * Start comitting the register changes (joins with the DPL=CPL branch).
2282 */
2283 pCtx->ss.Sel = NewSS;
2284 pCtx->ss.ValidSel = NewSS;
2285 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2286 pCtx->ss.u32Limit = cbLimitSS;
2287 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2288 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2289 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2290 pIemCpu->uCpl = uNewCpl;
2291 }
2292 /*
2293 * Same privilege, no stack change and smaller stack frame.
2294 */
2295 else
2296 {
2297 uint64_t uNewRsp;
2298 RTPTRUNION uStackFrame;
2299 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2300 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2301 if (rcStrict != VINF_SUCCESS)
2302 return rcStrict;
2303 void * const pvStackFrame = uStackFrame.pv;
2304
2305 if (fFlags & IEM_XCPT_FLAGS_ERR)
2306 *uStackFrame.pu32++ = uErr;
2307 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2308 ? pCtx->eip + cbInstr : pCtx->eip;
2309 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2310 uStackFrame.pu32[2] = fEfl;
2311 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2312 if (rcStrict != VINF_SUCCESS)
2313 return rcStrict;
2314
2315 /* Mark the CS selector as 'accessed'. */
2316 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2317 {
2318 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2319 if (rcStrict != VINF_SUCCESS)
2320 return rcStrict;
2321 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2322 }
2323
2324 /*
2325 * Start committing the register changes (joins with the other branch).
2326 */
2327 pCtx->rsp = uNewRsp;
2328 }
2329
2330 /* ... register committing continues. */
2331 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2332 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2333 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2334 pCtx->cs.u32Limit = cbLimitCS;
2335 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2336 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2337
2338 pCtx->rip = uNewEip;
2339 fEfl &= ~fEflToClear;
2340 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2341
2342 if (fFlags & IEM_XCPT_FLAGS_CR2)
2343 pCtx->cr2 = uCr2;
2344
2345 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2346 iemRaiseXcptAdjustState(pCtx, u8Vector);
2347
2348 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2349}
2350
2351
2352/**
2353 * Implements exceptions and interrupts for V8086 mode.
2354 *
2355 * @returns VBox strict status code.
2356 * @param pIemCpu The IEM per CPU instance data.
2357 * @param pCtx The CPU context.
2358 * @param cbInstr The number of bytes to offset rIP by in the return
2359 * address.
2360 * @param u8Vector The interrupt / exception vector number.
2361 * @param fFlags The flags.
2362 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2363 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2364 */
2365static VBOXSTRICTRC
2366iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2367 PCPUMCTX pCtx,
2368 uint8_t cbInstr,
2369 uint8_t u8Vector,
2370 uint32_t fFlags,
2371 uint16_t uErr,
2372 uint64_t uCr2)
2373{
2374 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2375 /** @todo implement me. */
2376 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2377}
2378
2379
2380/**
2381 * Implements exceptions and interrupts for long mode.
2382 *
2383 * @returns VBox strict status code.
2384 * @param pIemCpu The IEM per CPU instance data.
2385 * @param pCtx The CPU context.
2386 * @param cbInstr The number of bytes to offset rIP by in the return
2387 * address.
2388 * @param u8Vector The interrupt / exception vector number.
2389 * @param fFlags The flags.
2390 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2391 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2392 */
2393static VBOXSTRICTRC
2394iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2395 PCPUMCTX pCtx,
2396 uint8_t cbInstr,
2397 uint8_t u8Vector,
2398 uint32_t fFlags,
2399 uint16_t uErr,
2400 uint64_t uCr2)
2401{
2402 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2403 /** @todo implement me. */
2404 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
2405}
2406
2407
2408/**
2409 * Implements exceptions and interrupts.
2410 *
2411 * All exceptions and interrupts goes thru this function!
2412 *
2413 * @returns VBox strict status code.
2414 * @param pIemCpu The IEM per CPU instance data.
2415 * @param cbInstr The number of bytes to offset rIP by in the return
2416 * address.
2417 * @param u8Vector The interrupt / exception vector number.
2418 * @param fFlags The flags.
2419 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2420 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2421 */
2422DECL_NO_INLINE(static, VBOXSTRICTRC)
2423iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2424 uint8_t cbInstr,
2425 uint8_t u8Vector,
2426 uint32_t fFlags,
2427 uint16_t uErr,
2428 uint64_t uCr2)
2429{
2430 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2431
2432 /*
2433 * Do recursion accounting.
2434 */
2435 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2436 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2437 if (pIemCpu->cXcptRecursions == 0)
2438 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2439 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2440 else
2441 {
2442 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2443 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2444
2445 /** @todo double and tripple faults. */
2446 if (pIemCpu->cXcptRecursions >= 3)
2447 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2448
2449 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2450 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2451 {
2452 ....
2453 } */
2454 }
2455 pIemCpu->cXcptRecursions++;
2456 pIemCpu->uCurXcpt = u8Vector;
2457 pIemCpu->fCurXcpt = fFlags;
2458
2459 /*
2460 * Extensive logging.
2461 */
2462#if defined(LOG_ENABLED) && defined(IN_RING3)
2463 if (LogIs3Enabled())
2464 {
2465 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2466 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2467 char szRegs[4096];
2468 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2469 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2470 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2471 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2472 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2473 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2474 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2475 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2476 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2477 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2478 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2479 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2480 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2481 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2482 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2483 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2484 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2485 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2486 " efer=%016VR{efer}\n"
2487 " pat=%016VR{pat}\n"
2488 " sf_mask=%016VR{sf_mask}\n"
2489 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2490 " lstar=%016VR{lstar}\n"
2491 " star=%016VR{star} cstar=%016VR{cstar}\n"
2492 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2493 );
2494
2495 char szInstr[256];
2496 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2497 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2498 szInstr, sizeof(szInstr), NULL);
2499 Log3(("%s%s\n", szRegs, szInstr));
2500 }
2501#endif /* LOG_ENABLED */
2502
2503 /*
2504 * Call the mode specific worker function.
2505 */
2506 VBOXSTRICTRC rcStrict;
2507 if (!(pCtx->cr0 & X86_CR0_PE))
2508 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2509 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2510 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2511 else if (!pCtx->eflags.Bits.u1VM)
2512 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2513 else
2514 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2515
2516 /*
2517 * Unwind.
2518 */
2519 pIemCpu->cXcptRecursions--;
2520 pIemCpu->uCurXcpt = uPrevXcpt;
2521 pIemCpu->fCurXcpt = fPrevXcpt;
2522 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2523 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2524 return rcStrict;
2525}
2526
2527
2528/** \#DE - 00. */
2529DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2530{
2531 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2532}
2533
2534
2535/** \#DB - 01. */
2536DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2537{
2538 /** @todo set/clear RF. */
2539 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2540}
2541
2542
2543/** \#UD - 06. */
2544DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2545{
2546 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2547}
2548
2549
2550/** \#NM - 07. */
2551DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2552{
2553 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2554}
2555
2556
2557#ifdef SOME_UNUSED_FUNCTION
2558/** \#TS(err) - 0a. */
2559DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2560{
2561 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2562}
2563#endif
2564
2565
2566/** \#TS(tr) - 0a. */
2567DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2568{
2569 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2570 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2571}
2572
2573
2574/** \#NP(err) - 0b. */
2575DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2576{
2577 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2578}
2579
2580
2581/** \#NP(seg) - 0b. */
2582DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2583{
2584 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2585 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2586}
2587
2588
2589/** \#NP(sel) - 0b. */
2590DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2591{
2592 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2593 uSel & ~X86_SEL_RPL, 0);
2594}
2595
2596
2597/** \#SS(seg) - 0c. */
2598DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2599{
2600 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2601 uSel & ~X86_SEL_RPL, 0);
2602}
2603
2604
2605/** \#GP(n) - 0d. */
2606DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2607{
2608 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2609}
2610
2611
2612/** \#GP(0) - 0d. */
2613DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2614{
2615 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2616}
2617
2618
2619/** \#GP(sel) - 0d. */
2620DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2621{
2622 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2623 Sel & ~X86_SEL_RPL, 0);
2624}
2625
2626
2627/** \#GP(0) - 0d. */
2628DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2629{
2630 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2631}
2632
2633
2634/** \#GP(sel) - 0d. */
2635DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2636{
2637 NOREF(iSegReg); NOREF(fAccess);
2638 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2639 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2640}
2641
2642
2643/** \#GP(sel) - 0d. */
2644DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2645{
2646 NOREF(Sel);
2647 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2648}
2649
2650
2651/** \#GP(sel) - 0d. */
2652DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2653{
2654 NOREF(iSegReg); NOREF(fAccess);
2655 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2656}
2657
2658
2659/** \#PF(n) - 0e. */
2660DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2661{
2662 uint16_t uErr;
2663 switch (rc)
2664 {
2665 case VERR_PAGE_NOT_PRESENT:
2666 case VERR_PAGE_TABLE_NOT_PRESENT:
2667 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2668 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2669 uErr = 0;
2670 break;
2671
2672 default:
2673 AssertMsgFailed(("%Rrc\n", rc));
2674 case VERR_ACCESS_DENIED:
2675 uErr = X86_TRAP_PF_P;
2676 break;
2677
2678 /** @todo reserved */
2679 }
2680
2681 if (pIemCpu->uCpl == 3)
2682 uErr |= X86_TRAP_PF_US;
2683
2684 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2685 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2686 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2687 uErr |= X86_TRAP_PF_ID;
2688
2689 /* Note! RW access callers reporting a WRITE protection fault, will clear
2690 the READ flag before calling. So, read-modify-write accesses (RW)
2691 can safely be reported as READ faults. */
2692 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2693 uErr |= X86_TRAP_PF_RW;
2694
2695 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2696 uErr, GCPtrWhere);
2697}
2698
2699
2700/** \#MF(0) - 10. */
2701DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2702{
2703 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2704}
2705
2706
2707/** \#AC(0) - 11. */
2708DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2709{
2710 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2711}
2712
2713
2714/**
2715 * Macro for calling iemCImplRaiseDivideError().
2716 *
2717 * This enables us to add/remove arguments and force different levels of
2718 * inlining as we wish.
2719 *
2720 * @return Strict VBox status code.
2721 */
2722#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2723IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2724{
2725 NOREF(cbInstr);
2726 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2727}
2728
2729
2730/**
2731 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2732 *
2733 * This enables us to add/remove arguments and force different levels of
2734 * inlining as we wish.
2735 *
2736 * @return Strict VBox status code.
2737 */
2738#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2739IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2740{
2741 NOREF(cbInstr);
2742 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2743}
2744
2745
2746/**
2747 * Macro for calling iemCImplRaiseInvalidOpcode().
2748 *
2749 * This enables us to add/remove arguments and force different levels of
2750 * inlining as we wish.
2751 *
2752 * @return Strict VBox status code.
2753 */
2754#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2755IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2756{
2757 NOREF(cbInstr);
2758 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2759}
2760
2761
2762/** @} */
2763
2764
2765/*
2766 *
2767 * Helpers routines.
2768 * Helpers routines.
2769 * Helpers routines.
2770 *
2771 */
2772
2773/**
2774 * Recalculates the effective operand size.
2775 *
2776 * @param pIemCpu The IEM state.
2777 */
2778static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2779{
2780 switch (pIemCpu->enmCpuMode)
2781 {
2782 case IEMMODE_16BIT:
2783 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2784 break;
2785 case IEMMODE_32BIT:
2786 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2787 break;
2788 case IEMMODE_64BIT:
2789 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2790 {
2791 case 0:
2792 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2793 break;
2794 case IEM_OP_PRF_SIZE_OP:
2795 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2796 break;
2797 case IEM_OP_PRF_SIZE_REX_W:
2798 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2799 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2800 break;
2801 }
2802 break;
2803 default:
2804 AssertFailed();
2805 }
2806}
2807
2808
2809/**
2810 * Sets the default operand size to 64-bit and recalculates the effective
2811 * operand size.
2812 *
2813 * @param pIemCpu The IEM state.
2814 */
2815static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2816{
2817 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2818 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2819 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2820 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2821 else
2822 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2823}
2824
2825
2826/*
2827 *
2828 * Common opcode decoders.
2829 * Common opcode decoders.
2830 * Common opcode decoders.
2831 *
2832 */
2833//#include <iprt/mem.h>
2834
2835/**
2836 * Used to add extra details about a stub case.
2837 * @param pIemCpu The IEM per CPU state.
2838 */
2839static void iemOpStubMsg2(PIEMCPU pIemCpu)
2840{
2841#if defined(LOG_ENABLED) && defined(IN_RING3)
2842 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2843 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2844 char szRegs[4096];
2845 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2846 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2847 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2848 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2849 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2850 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2851 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2852 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2853 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2854 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2855 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2856 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2857 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2858 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2859 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2860 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2861 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2862 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2863 " efer=%016VR{efer}\n"
2864 " pat=%016VR{pat}\n"
2865 " sf_mask=%016VR{sf_mask}\n"
2866 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2867 " lstar=%016VR{lstar}\n"
2868 " star=%016VR{star} cstar=%016VR{cstar}\n"
2869 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2870 );
2871
2872 char szInstr[256];
2873 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2874 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2875 szInstr, sizeof(szInstr), NULL);
2876
2877 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2878#else
2879 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2880#endif
2881}
2882
2883/**
2884 * Complains about a stub.
2885 *
2886 * Providing two versions of this macro, one for daily use and one for use when
2887 * working on IEM.
2888 */
2889#if 0
2890# define IEMOP_BITCH_ABOUT_STUB() \
2891 do { \
2892 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2893 iemOpStubMsg2(pIemCpu); \
2894 RTAssertPanic(); \
2895 } while (0)
2896#else
2897# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
2898#endif
2899
2900/** Stubs an opcode. */
2901#define FNIEMOP_STUB(a_Name) \
2902 FNIEMOP_DEF(a_Name) \
2903 { \
2904 IEMOP_BITCH_ABOUT_STUB(); \
2905 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2906 } \
2907 typedef int ignore_semicolon
2908
2909/** Stubs an opcode. */
2910#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2911 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2912 { \
2913 IEMOP_BITCH_ABOUT_STUB(); \
2914 NOREF(a_Name0); \
2915 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2916 } \
2917 typedef int ignore_semicolon
2918
2919/** Stubs an opcode which currently should raise \#UD. */
2920#define FNIEMOP_UD_STUB(a_Name) \
2921 FNIEMOP_DEF(a_Name) \
2922 { \
2923 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2924 return IEMOP_RAISE_INVALID_OPCODE(); \
2925 } \
2926 typedef int ignore_semicolon
2927
2928/** Stubs an opcode which currently should raise \#UD. */
2929#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2930 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2931 { \
2932 NOREF(a_Name0); \
2933 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2934 return IEMOP_RAISE_INVALID_OPCODE(); \
2935 } \
2936 typedef int ignore_semicolon
2937
2938
2939
2940/** @name Register Access.
2941 * @{
2942 */
2943
2944/**
2945 * Gets a reference (pointer) to the specified hidden segment register.
2946 *
2947 * @returns Hidden register reference.
2948 * @param pIemCpu The per CPU data.
2949 * @param iSegReg The segment register.
2950 */
2951static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2952{
2953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2954 PCPUMSELREG pSReg;
2955 switch (iSegReg)
2956 {
2957 case X86_SREG_ES: pSReg = &pCtx->es; break;
2958 case X86_SREG_CS: pSReg = &pCtx->cs; break;
2959 case X86_SREG_SS: pSReg = &pCtx->ss; break;
2960 case X86_SREG_DS: pSReg = &pCtx->ds; break;
2961 case X86_SREG_FS: pSReg = &pCtx->fs; break;
2962 case X86_SREG_GS: pSReg = &pCtx->gs; break;
2963 default:
2964 AssertFailedReturn(NULL);
2965 }
2966#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2967 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
2968 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
2969#else
2970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2971#endif
2972 return pSReg;
2973}
2974
2975
2976/**
2977 * Gets a reference (pointer) to the specified segment register (the selector
2978 * value).
2979 *
2980 * @returns Pointer to the selector variable.
2981 * @param pIemCpu The per CPU data.
2982 * @param iSegReg The segment register.
2983 */
2984static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2985{
2986 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2987 switch (iSegReg)
2988 {
2989 case X86_SREG_ES: return &pCtx->es.Sel;
2990 case X86_SREG_CS: return &pCtx->cs.Sel;
2991 case X86_SREG_SS: return &pCtx->ss.Sel;
2992 case X86_SREG_DS: return &pCtx->ds.Sel;
2993 case X86_SREG_FS: return &pCtx->fs.Sel;
2994 case X86_SREG_GS: return &pCtx->gs.Sel;
2995 }
2996 AssertFailedReturn(NULL);
2997}
2998
2999
3000/**
3001 * Fetches the selector value of a segment register.
3002 *
3003 * @returns The selector value.
3004 * @param pIemCpu The per CPU data.
3005 * @param iSegReg The segment register.
3006 */
3007static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3008{
3009 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3010 switch (iSegReg)
3011 {
3012 case X86_SREG_ES: return pCtx->es.Sel;
3013 case X86_SREG_CS: return pCtx->cs.Sel;
3014 case X86_SREG_SS: return pCtx->ss.Sel;
3015 case X86_SREG_DS: return pCtx->ds.Sel;
3016 case X86_SREG_FS: return pCtx->fs.Sel;
3017 case X86_SREG_GS: return pCtx->gs.Sel;
3018 }
3019 AssertFailedReturn(0xffff);
3020}
3021
3022
3023/**
3024 * Gets a reference (pointer) to the specified general register.
3025 *
3026 * @returns Register reference.
3027 * @param pIemCpu The per CPU data.
3028 * @param iReg The general register.
3029 */
3030static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3031{
3032 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3033 switch (iReg)
3034 {
3035 case X86_GREG_xAX: return &pCtx->rax;
3036 case X86_GREG_xCX: return &pCtx->rcx;
3037 case X86_GREG_xDX: return &pCtx->rdx;
3038 case X86_GREG_xBX: return &pCtx->rbx;
3039 case X86_GREG_xSP: return &pCtx->rsp;
3040 case X86_GREG_xBP: return &pCtx->rbp;
3041 case X86_GREG_xSI: return &pCtx->rsi;
3042 case X86_GREG_xDI: return &pCtx->rdi;
3043 case X86_GREG_x8: return &pCtx->r8;
3044 case X86_GREG_x9: return &pCtx->r9;
3045 case X86_GREG_x10: return &pCtx->r10;
3046 case X86_GREG_x11: return &pCtx->r11;
3047 case X86_GREG_x12: return &pCtx->r12;
3048 case X86_GREG_x13: return &pCtx->r13;
3049 case X86_GREG_x14: return &pCtx->r14;
3050 case X86_GREG_x15: return &pCtx->r15;
3051 }
3052 AssertFailedReturn(NULL);
3053}
3054
3055
3056/**
3057 * Gets a reference (pointer) to the specified 8-bit general register.
3058 *
3059 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3060 *
3061 * @returns Register reference.
3062 * @param pIemCpu The per CPU data.
3063 * @param iReg The register.
3064 */
3065static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3066{
3067 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3068 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3069
3070 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3071 if (iReg >= 4)
3072 pu8Reg++;
3073 return pu8Reg;
3074}
3075
3076
3077/**
3078 * Fetches the value of a 8-bit general register.
3079 *
3080 * @returns The register value.
3081 * @param pIemCpu The per CPU data.
3082 * @param iReg The register.
3083 */
3084static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3085{
3086 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3087 return *pbSrc;
3088}
3089
3090
3091/**
3092 * Fetches the value of a 16-bit general register.
3093 *
3094 * @returns The register value.
3095 * @param pIemCpu The per CPU data.
3096 * @param iReg The register.
3097 */
3098static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3099{
3100 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3101}
3102
3103
3104/**
3105 * Fetches the value of a 32-bit general register.
3106 *
3107 * @returns The register value.
3108 * @param pIemCpu The per CPU data.
3109 * @param iReg The register.
3110 */
3111static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3112{
3113 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3114}
3115
3116
3117/**
3118 * Fetches the value of a 64-bit general register.
3119 *
3120 * @returns The register value.
3121 * @param pIemCpu The per CPU data.
3122 * @param iReg The register.
3123 */
3124static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3125{
3126 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3127}
3128
3129
3130/**
3131 * Is the FPU state in FXSAVE format or not.
3132 *
3133 * @returns true if it is, false if it's in FNSAVE.
3134 * @param pVCpu Pointer to the VMCPU.
3135 */
3136DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3137{
3138#ifdef RT_ARCH_AMD64
3139 NOREF(pIemCpu);
3140 return true;
3141#else
3142 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3143 return true;
3144#endif
3145}
3146
3147
3148/**
3149 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3150 *
3151 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3152 * segment limit.
3153 *
3154 * @param pIemCpu The per CPU data.
3155 * @param offNextInstr The offset of the next instruction.
3156 */
3157static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3158{
3159 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3160 switch (pIemCpu->enmEffOpSize)
3161 {
3162 case IEMMODE_16BIT:
3163 {
3164 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3165 if ( uNewIp > pCtx->cs.u32Limit
3166 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3167 return iemRaiseGeneralProtectionFault0(pIemCpu);
3168 pCtx->rip = uNewIp;
3169 break;
3170 }
3171
3172 case IEMMODE_32BIT:
3173 {
3174 Assert(pCtx->rip <= UINT32_MAX);
3175 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3176
3177 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3178 if (uNewEip > pCtx->cs.u32Limit)
3179 return iemRaiseGeneralProtectionFault0(pIemCpu);
3180 pCtx->rip = uNewEip;
3181 break;
3182 }
3183
3184 case IEMMODE_64BIT:
3185 {
3186 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3187
3188 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3189 if (!IEM_IS_CANONICAL(uNewRip))
3190 return iemRaiseGeneralProtectionFault0(pIemCpu);
3191 pCtx->rip = uNewRip;
3192 break;
3193 }
3194
3195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3196 }
3197
3198 return VINF_SUCCESS;
3199}
3200
3201
3202/**
3203 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3204 *
3205 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3206 * segment limit.
3207 *
3208 * @returns Strict VBox status code.
3209 * @param pIemCpu The per CPU data.
3210 * @param offNextInstr The offset of the next instruction.
3211 */
3212static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3213{
3214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3215 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3216
3217 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3218 if ( uNewIp > pCtx->cs.u32Limit
3219 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3220 return iemRaiseGeneralProtectionFault0(pIemCpu);
3221 /** @todo Test 16-bit jump in 64-bit mode. */
3222 pCtx->rip = uNewIp;
3223
3224 return VINF_SUCCESS;
3225}
3226
3227
3228/**
3229 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3230 *
3231 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3232 * segment limit.
3233 *
3234 * @returns Strict VBox status code.
3235 * @param pIemCpu The per CPU data.
3236 * @param offNextInstr The offset of the next instruction.
3237 */
3238static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3239{
3240 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3241 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3242
3243 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3244 {
3245 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3246
3247 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3248 if (uNewEip > pCtx->cs.u32Limit)
3249 return iemRaiseGeneralProtectionFault0(pIemCpu);
3250 pCtx->rip = uNewEip;
3251 }
3252 else
3253 {
3254 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3255
3256 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3257 if (!IEM_IS_CANONICAL(uNewRip))
3258 return iemRaiseGeneralProtectionFault0(pIemCpu);
3259 pCtx->rip = uNewRip;
3260 }
3261 return VINF_SUCCESS;
3262}
3263
3264
3265/**
3266 * Performs a near jump to the specified address.
3267 *
3268 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3269 * segment limit.
3270 *
3271 * @param pIemCpu The per CPU data.
3272 * @param uNewRip The new RIP value.
3273 */
3274static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3275{
3276 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3277 switch (pIemCpu->enmEffOpSize)
3278 {
3279 case IEMMODE_16BIT:
3280 {
3281 Assert(uNewRip <= UINT16_MAX);
3282 if ( uNewRip > pCtx->cs.u32Limit
3283 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3284 return iemRaiseGeneralProtectionFault0(pIemCpu);
3285 /** @todo Test 16-bit jump in 64-bit mode. */
3286 pCtx->rip = uNewRip;
3287 break;
3288 }
3289
3290 case IEMMODE_32BIT:
3291 {
3292 Assert(uNewRip <= UINT32_MAX);
3293 Assert(pCtx->rip <= UINT32_MAX);
3294 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3295
3296 if (uNewRip > pCtx->cs.u32Limit)
3297 return iemRaiseGeneralProtectionFault0(pIemCpu);
3298 pCtx->rip = uNewRip;
3299 break;
3300 }
3301
3302 case IEMMODE_64BIT:
3303 {
3304 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3305
3306 if (!IEM_IS_CANONICAL(uNewRip))
3307 return iemRaiseGeneralProtectionFault0(pIemCpu);
3308 pCtx->rip = uNewRip;
3309 break;
3310 }
3311
3312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3313 }
3314
3315 return VINF_SUCCESS;
3316}
3317
3318
3319/**
3320 * Get the address of the top of the stack.
3321 *
3322 * @param pCtx The CPU context which SP/ESP/RSP should be
3323 * read.
3324 */
3325DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3326{
3327 if (pCtx->ss.Attr.n.u1Long)
3328 return pCtx->rsp;
3329 if (pCtx->ss.Attr.n.u1DefBig)
3330 return pCtx->esp;
3331 return pCtx->sp;
3332}
3333
3334
3335/**
3336 * Updates the RIP/EIP/IP to point to the next instruction.
3337 *
3338 * @param pIemCpu The per CPU data.
3339 * @param cbInstr The number of bytes to add.
3340 */
3341static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3342{
3343 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3344 switch (pIemCpu->enmCpuMode)
3345 {
3346 case IEMMODE_16BIT:
3347 Assert(pCtx->rip <= UINT16_MAX);
3348 pCtx->eip += cbInstr;
3349 pCtx->eip &= UINT32_C(0xffff);
3350 break;
3351
3352 case IEMMODE_32BIT:
3353 pCtx->eip += cbInstr;
3354 Assert(pCtx->rip <= UINT32_MAX);
3355 break;
3356
3357 case IEMMODE_64BIT:
3358 pCtx->rip += cbInstr;
3359 break;
3360 default: AssertFailed();
3361 }
3362}
3363
3364
3365/**
3366 * Updates the RIP/EIP/IP to point to the next instruction.
3367 *
3368 * @param pIemCpu The per CPU data.
3369 */
3370static void iemRegUpdateRip(PIEMCPU pIemCpu)
3371{
3372 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3373}
3374
3375
3376/**
3377 * Adds to the stack pointer.
3378 *
3379 * @param pCtx The CPU context which SP/ESP/RSP should be
3380 * updated.
3381 * @param cbToAdd The number of bytes to add.
3382 */
3383DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3384{
3385 if (pCtx->ss.Attr.n.u1Long)
3386 pCtx->rsp += cbToAdd;
3387 else if (pCtx->ss.Attr.n.u1DefBig)
3388 pCtx->esp += cbToAdd;
3389 else
3390 pCtx->sp += cbToAdd;
3391}
3392
3393
3394/**
3395 * Subtracts from the stack pointer.
3396 *
3397 * @param pCtx The CPU context which SP/ESP/RSP should be
3398 * updated.
3399 * @param cbToSub The number of bytes to subtract.
3400 */
3401DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3402{
3403 if (pCtx->ss.Attr.n.u1Long)
3404 pCtx->rsp -= cbToSub;
3405 else if (pCtx->ss.Attr.n.u1DefBig)
3406 pCtx->esp -= cbToSub;
3407 else
3408 pCtx->sp -= cbToSub;
3409}
3410
3411
3412/**
3413 * Adds to the temporary stack pointer.
3414 *
3415 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3416 * @param cbToAdd The number of bytes to add.
3417 * @param pCtx Where to get the current stack mode.
3418 */
3419DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3420{
3421 if (pCtx->ss.Attr.n.u1Long)
3422 pTmpRsp->u += cbToAdd;
3423 else if (pCtx->ss.Attr.n.u1DefBig)
3424 pTmpRsp->DWords.dw0 += cbToAdd;
3425 else
3426 pTmpRsp->Words.w0 += cbToAdd;
3427}
3428
3429
3430/**
3431 * Subtracts from the temporary stack pointer.
3432 *
3433 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3434 * @param cbToSub The number of bytes to subtract.
3435 * @param pCtx Where to get the current stack mode.
3436 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3437 * expecting that.
3438 */
3439DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3440{
3441 if (pCtx->ss.Attr.n.u1Long)
3442 pTmpRsp->u -= cbToSub;
3443 else if (pCtx->ss.Attr.n.u1DefBig)
3444 pTmpRsp->DWords.dw0 -= cbToSub;
3445 else
3446 pTmpRsp->Words.w0 -= cbToSub;
3447}
3448
3449
3450/**
3451 * Calculates the effective stack address for a push of the specified size as
3452 * well as the new RSP value (upper bits may be masked).
3453 *
3454 * @returns Effective stack addressf for the push.
3455 * @param pCtx Where to get the current stack mode.
3456 * @param cbItem The size of the stack item to pop.
3457 * @param puNewRsp Where to return the new RSP value.
3458 */
3459DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3460{
3461 RTUINT64U uTmpRsp;
3462 RTGCPTR GCPtrTop;
3463 uTmpRsp.u = pCtx->rsp;
3464
3465 if (pCtx->ss.Attr.n.u1Long)
3466 GCPtrTop = uTmpRsp.u -= cbItem;
3467 else if (pCtx->ss.Attr.n.u1DefBig)
3468 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3469 else
3470 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3471 *puNewRsp = uTmpRsp.u;
3472 return GCPtrTop;
3473}
3474
3475
3476/**
3477 * Gets the current stack pointer and calculates the value after a pop of the
3478 * specified size.
3479 *
3480 * @returns Current stack pointer.
3481 * @param pCtx Where to get the current stack mode.
3482 * @param cbItem The size of the stack item to pop.
3483 * @param puNewRsp Where to return the new RSP value.
3484 */
3485DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3486{
3487 RTUINT64U uTmpRsp;
3488 RTGCPTR GCPtrTop;
3489 uTmpRsp.u = pCtx->rsp;
3490
3491 if (pCtx->ss.Attr.n.u1Long)
3492 {
3493 GCPtrTop = uTmpRsp.u;
3494 uTmpRsp.u += cbItem;
3495 }
3496 else if (pCtx->ss.Attr.n.u1DefBig)
3497 {
3498 GCPtrTop = uTmpRsp.DWords.dw0;
3499 uTmpRsp.DWords.dw0 += cbItem;
3500 }
3501 else
3502 {
3503 GCPtrTop = uTmpRsp.Words.w0;
3504 uTmpRsp.Words.w0 += cbItem;
3505 }
3506 *puNewRsp = uTmpRsp.u;
3507 return GCPtrTop;
3508}
3509
3510
3511/**
3512 * Calculates the effective stack address for a push of the specified size as
3513 * well as the new temporary RSP value (upper bits may be masked).
3514 *
3515 * @returns Effective stack addressf for the push.
3516 * @param pTmpRsp The temporary stack pointer. This is updated.
3517 * @param cbItem The size of the stack item to pop.
3518 * @param puNewRsp Where to return the new RSP value.
3519 */
3520DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3521{
3522 RTGCPTR GCPtrTop;
3523
3524 if (pCtx->ss.Attr.n.u1Long)
3525 GCPtrTop = pTmpRsp->u -= cbItem;
3526 else if (pCtx->ss.Attr.n.u1DefBig)
3527 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3528 else
3529 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3530 return GCPtrTop;
3531}
3532
3533
3534/**
3535 * Gets the effective stack address for a pop of the specified size and
3536 * calculates and updates the temporary RSP.
3537 *
3538 * @returns Current stack pointer.
3539 * @param pTmpRsp The temporary stack pointer. This is updated.
3540 * @param pCtx Where to get the current stack mode.
3541 * @param cbItem The size of the stack item to pop.
3542 */
3543DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3544{
3545 RTGCPTR GCPtrTop;
3546 if (pCtx->ss.Attr.n.u1Long)
3547 {
3548 GCPtrTop = pTmpRsp->u;
3549 pTmpRsp->u += cbItem;
3550 }
3551 else if (pCtx->ss.Attr.n.u1DefBig)
3552 {
3553 GCPtrTop = pTmpRsp->DWords.dw0;
3554 pTmpRsp->DWords.dw0 += cbItem;
3555 }
3556 else
3557 {
3558 GCPtrTop = pTmpRsp->Words.w0;
3559 pTmpRsp->Words.w0 += cbItem;
3560 }
3561 return GCPtrTop;
3562}
3563
3564
3565/**
3566 * Checks if an Intel CPUID feature bit is set.
3567 *
3568 * @returns true / false.
3569 *
3570 * @param pIemCpu The IEM per CPU data.
3571 * @param fEdx The EDX bit to test, or 0 if ECX.
3572 * @param fEcx The ECX bit to test, or 0 if EDX.
3573 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3574 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3575 */
3576static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3577{
3578 uint32_t uEax, uEbx, uEcx, uEdx;
3579 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3580 return (fEcx && (uEcx & fEcx))
3581 || (fEdx && (uEdx & fEdx));
3582}
3583
3584
3585/**
3586 * Checks if an AMD CPUID feature bit is set.
3587 *
3588 * @returns true / false.
3589 *
3590 * @param pIemCpu The IEM per CPU data.
3591 * @param fEdx The EDX bit to test, or 0 if ECX.
3592 * @param fEcx The ECX bit to test, or 0 if EDX.
3593 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3594 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3595 */
3596static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3597{
3598 uint32_t uEax, uEbx, uEcx, uEdx;
3599 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3600 return (fEcx && (uEcx & fEcx))
3601 || (fEdx && (uEdx & fEdx));
3602}
3603
3604/** @} */
3605
3606
3607/** @name FPU access and helpers.
3608 *
3609 * @{
3610 */
3611
3612
3613/**
3614 * Hook for preparing to use the host FPU.
3615 *
3616 * This is necessary in ring-0 and raw-mode context.
3617 *
3618 * @param pIemCpu The IEM per CPU data.
3619 */
3620DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3621{
3622#ifdef IN_RING3
3623 NOREF(pIemCpu);
3624#else
3625/** @todo RZ: FIXME */
3626//# error "Implement me"
3627#endif
3628}
3629
3630
3631/**
3632 * Stores a QNaN value into a FPU register.
3633 *
3634 * @param pReg Pointer to the register.
3635 */
3636DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3637{
3638 pReg->au32[0] = UINT32_C(0x00000000);
3639 pReg->au32[1] = UINT32_C(0xc0000000);
3640 pReg->au16[4] = UINT16_C(0xffff);
3641}
3642
3643
3644/**
3645 * Updates the FOP, FPU.CS and FPUIP registers.
3646 *
3647 * @param pIemCpu The IEM per CPU data.
3648 * @param pCtx The CPU context.
3649 */
3650DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3651{
3652 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3653 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3654 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3655 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3656 {
3657 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3658 * happens in real mode here based on the fnsave and fnstenv images. */
3659 pCtx->fpu.CS = 0;
3660 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3661 }
3662 else
3663 {
3664 pCtx->fpu.CS = pCtx->cs.Sel;
3665 pCtx->fpu.FPUIP = pCtx->rip;
3666 }
3667}
3668
3669
3670/**
3671 * Updates the FPU.DS and FPUDP registers.
3672 *
3673 * @param pIemCpu The IEM per CPU data.
3674 * @param pCtx The CPU context.
3675 * @param iEffSeg The effective segment register.
3676 * @param GCPtrEff The effective address relative to @a iEffSeg.
3677 */
3678DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3679{
3680 RTSEL sel;
3681 switch (iEffSeg)
3682 {
3683 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3684 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3685 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3686 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3687 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3688 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3689 default:
3690 AssertMsgFailed(("%d\n", iEffSeg));
3691 sel = pCtx->ds.Sel;
3692 }
3693 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3694 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3695 {
3696 pCtx->fpu.DS = 0;
3697 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3698 }
3699 else
3700 {
3701 pCtx->fpu.DS = sel;
3702 pCtx->fpu.FPUDP = GCPtrEff;
3703 }
3704}
3705
3706
3707/**
3708 * Rotates the stack registers in the push direction.
3709 *
3710 * @param pCtx The CPU context.
3711 * @remarks This is a complete waste of time, but fxsave stores the registers in
3712 * stack order.
3713 */
3714DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3715{
3716 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3717 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3718 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3719 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3720 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3721 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3722 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3723 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3724 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3725}
3726
3727
3728/**
3729 * Rotates the stack registers in the pop direction.
3730 *
3731 * @param pCtx The CPU context.
3732 * @remarks This is a complete waste of time, but fxsave stores the registers in
3733 * stack order.
3734 */
3735DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3736{
3737 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3738 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3739 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3740 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3741 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3742 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3743 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3744 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3745 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3746}
3747
3748
3749/**
3750 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3751 * exception prevents it.
3752 *
3753 * @param pIemCpu The IEM per CPU data.
3754 * @param pResult The FPU operation result to push.
3755 * @param pCtx The CPU context.
3756 */
3757static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3758{
3759 /* Update FSW and bail if there are pending exceptions afterwards. */
3760 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3761 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3762 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3763 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3764 {
3765 pCtx->fpu.FSW = fFsw;
3766 return;
3767 }
3768
3769 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3770 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3771 {
3772 /* All is fine, push the actual value. */
3773 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3774 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3775 }
3776 else if (pCtx->fpu.FCW & X86_FCW_IM)
3777 {
3778 /* Masked stack overflow, push QNaN. */
3779 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3780 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3781 }
3782 else
3783 {
3784 /* Raise stack overflow, don't push anything. */
3785 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3786 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3787 return;
3788 }
3789
3790 fFsw &= ~X86_FSW_TOP_MASK;
3791 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3792 pCtx->fpu.FSW = fFsw;
3793
3794 iemFpuRotateStackPush(pCtx);
3795}
3796
3797
3798/**
3799 * Stores a result in a FPU register and updates the FSW and FTW.
3800 *
3801 * @param pIemCpu The IEM per CPU data.
3802 * @param pResult The result to store.
3803 * @param iStReg Which FPU register to store it in.
3804 * @param pCtx The CPU context.
3805 */
3806static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3807{
3808 Assert(iStReg < 8);
3809 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3810 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3811 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3812 pCtx->fpu.FTW |= RT_BIT(iReg);
3813 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3814}
3815
3816
3817/**
3818 * Only updates the FPU status word (FSW) with the result of the current
3819 * instruction.
3820 *
3821 * @param pCtx The CPU context.
3822 * @param u16FSW The FSW output of the current instruction.
3823 */
3824static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3825{
3826 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3827 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3828}
3829
3830
3831/**
3832 * Pops one item off the FPU stack if no pending exception prevents it.
3833 *
3834 * @param pCtx The CPU context.
3835 */
3836static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3837{
3838 /* Check pending exceptions. */
3839 uint16_t uFSW = pCtx->fpu.FSW;
3840 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3841 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3842 return;
3843
3844 /* TOP--. */
3845 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3846 uFSW &= ~X86_FSW_TOP_MASK;
3847 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3848 pCtx->fpu.FSW = uFSW;
3849
3850 /* Mark the previous ST0 as empty. */
3851 iOldTop >>= X86_FSW_TOP_SHIFT;
3852 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3853
3854 /* Rotate the registers. */
3855 iemFpuRotateStackPop(pCtx);
3856}
3857
3858
3859/**
3860 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3861 *
3862 * @param pIemCpu The IEM per CPU data.
3863 * @param pResult The FPU operation result to push.
3864 */
3865static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3866{
3867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3868 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3869 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3870}
3871
3872
3873/**
3874 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3875 * and sets FPUDP and FPUDS.
3876 *
3877 * @param pIemCpu The IEM per CPU data.
3878 * @param pResult The FPU operation result to push.
3879 * @param iEffSeg The effective segment register.
3880 * @param GCPtrEff The effective address relative to @a iEffSeg.
3881 */
3882static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3883{
3884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3885 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3886 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3887 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3888}
3889
3890
3891/**
3892 * Replace ST0 with the first value and push the second onto the FPU stack,
3893 * unless a pending exception prevents it.
3894 *
3895 * @param pIemCpu The IEM per CPU data.
3896 * @param pResult The FPU operation result to store and push.
3897 */
3898static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3899{
3900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3901 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3902
3903 /* Update FSW and bail if there are pending exceptions afterwards. */
3904 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3905 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3906 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3907 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3908 {
3909 pCtx->fpu.FSW = fFsw;
3910 return;
3911 }
3912
3913 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3914 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3915 {
3916 /* All is fine, push the actual value. */
3917 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3918 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3919 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3920 }
3921 else if (pCtx->fpu.FCW & X86_FCW_IM)
3922 {
3923 /* Masked stack overflow, push QNaN. */
3924 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3925 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3926 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3927 }
3928 else
3929 {
3930 /* Raise stack overflow, don't push anything. */
3931 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3932 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3933 return;
3934 }
3935
3936 fFsw &= ~X86_FSW_TOP_MASK;
3937 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3938 pCtx->fpu.FSW = fFsw;
3939
3940 iemFpuRotateStackPush(pCtx);
3941}
3942
3943
3944/**
3945 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3946 * FOP.
3947 *
3948 * @param pIemCpu The IEM per CPU data.
3949 * @param pResult The result to store.
3950 * @param iStReg Which FPU register to store it in.
3951 * @param pCtx The CPU context.
3952 */
3953static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3954{
3955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3956 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3957 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3958}
3959
3960
3961/**
3962 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3963 * FOP, and then pops the stack.
3964 *
3965 * @param pIemCpu The IEM per CPU data.
3966 * @param pResult The result to store.
3967 * @param iStReg Which FPU register to store it in.
3968 * @param pCtx The CPU context.
3969 */
3970static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3971{
3972 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3973 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3974 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3975 iemFpuMaybePopOne(pCtx);
3976}
3977
3978
3979/**
3980 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3981 * FPUDP, and FPUDS.
3982 *
3983 * @param pIemCpu The IEM per CPU data.
3984 * @param pResult The result to store.
3985 * @param iStReg Which FPU register to store it in.
3986 * @param pCtx The CPU context.
3987 * @param iEffSeg The effective memory operand selector register.
3988 * @param GCPtrEff The effective memory operand offset.
3989 */
3990static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3991{
3992 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3993 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3994 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3995 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3996}
3997
3998
3999/**
4000 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4001 * FPUDP, and FPUDS, and then pops the stack.
4002 *
4003 * @param pIemCpu The IEM per CPU data.
4004 * @param pResult The result to store.
4005 * @param iStReg Which FPU register to store it in.
4006 * @param pCtx The CPU context.
4007 * @param iEffSeg The effective memory operand selector register.
4008 * @param GCPtrEff The effective memory operand offset.
4009 */
4010static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4011 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4012{
4013 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4014 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4015 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4016 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4017 iemFpuMaybePopOne(pCtx);
4018}
4019
4020
4021/**
4022 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4023 *
4024 * @param pIemCpu The IEM per CPU data.
4025 */
4026static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4027{
4028 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4029}
4030
4031
4032/**
4033 * Marks the specified stack register as free (for FFREE).
4034 *
4035 * @param pIemCpu The IEM per CPU data.
4036 * @param iStReg The register to free.
4037 */
4038static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4039{
4040 Assert(iStReg < 8);
4041 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4042 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4043 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4044}
4045
4046
4047/**
4048 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4049 *
4050 * @param pIemCpu The IEM per CPU data.
4051 */
4052static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4053{
4054 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4055 uint16_t uFsw = pCtx->fpu.FSW;
4056 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4057 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4058 uFsw &= ~X86_FSW_TOP_MASK;
4059 uFsw |= uTop;
4060 pCtx->fpu.FSW = uFsw;
4061}
4062
4063
4064/**
4065 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4066 *
4067 * @param pIemCpu The IEM per CPU data.
4068 */
4069static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4070{
4071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4072 uint16_t uFsw = pCtx->fpu.FSW;
4073 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4074 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4075 uFsw &= ~X86_FSW_TOP_MASK;
4076 uFsw |= uTop;
4077 pCtx->fpu.FSW = uFsw;
4078}
4079
4080
4081/**
4082 * Updates the FSW, FOP, FPUIP, and FPUCS.
4083 *
4084 * @param pIemCpu The IEM per CPU data.
4085 * @param u16FSW The FSW from the current instruction.
4086 */
4087static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4088{
4089 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4090 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4091 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4092}
4093
4094
4095/**
4096 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4097 *
4098 * @param pIemCpu The IEM per CPU data.
4099 * @param u16FSW The FSW from the current instruction.
4100 */
4101static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4102{
4103 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4104 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4105 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4106 iemFpuMaybePopOne(pCtx);
4107}
4108
4109
4110/**
4111 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4112 *
4113 * @param pIemCpu The IEM per CPU data.
4114 * @param u16FSW The FSW from the current instruction.
4115 * @param iEffSeg The effective memory operand selector register.
4116 * @param GCPtrEff The effective memory operand offset.
4117 */
4118static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4119{
4120 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4121 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4122 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4123 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4124}
4125
4126
4127/**
4128 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4129 *
4130 * @param pIemCpu The IEM per CPU data.
4131 * @param u16FSW The FSW from the current instruction.
4132 */
4133static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4134{
4135 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4136 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4137 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4138 iemFpuMaybePopOne(pCtx);
4139 iemFpuMaybePopOne(pCtx);
4140}
4141
4142
4143/**
4144 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4145 *
4146 * @param pIemCpu The IEM per CPU data.
4147 * @param u16FSW The FSW from the current instruction.
4148 * @param iEffSeg The effective memory operand selector register.
4149 * @param GCPtrEff The effective memory operand offset.
4150 */
4151static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4152{
4153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4154 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4155 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4156 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4157 iemFpuMaybePopOne(pCtx);
4158}
4159
4160
4161/**
4162 * Worker routine for raising an FPU stack underflow exception.
4163 *
4164 * @param pIemCpu The IEM per CPU data.
4165 * @param iStReg The stack register being accessed.
4166 * @param pCtx The CPU context.
4167 */
4168static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4169{
4170 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4171 if (pCtx->fpu.FCW & X86_FCW_IM)
4172 {
4173 /* Masked underflow. */
4174 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4175 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4176 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4177 if (iStReg != UINT8_MAX)
4178 {
4179 pCtx->fpu.FTW |= RT_BIT(iReg);
4180 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4181 }
4182 }
4183 else
4184 {
4185 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4186 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4187 }
4188}
4189
4190
4191/**
4192 * Raises a FPU stack underflow exception.
4193 *
4194 * @param pIemCpu The IEM per CPU data.
4195 * @param iStReg The destination register that should be loaded
4196 * with QNaN if \#IS is not masked. Specify
4197 * UINT8_MAX if none (like for fcom).
4198 */
4199DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4200{
4201 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4202 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4203 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4204}
4205
4206
4207DECL_NO_INLINE(static, void)
4208iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4209{
4210 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4211 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4212 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4213 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4214}
4215
4216
4217DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4218{
4219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4220 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4221 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4222 iemFpuMaybePopOne(pCtx);
4223}
4224
4225
4226DECL_NO_INLINE(static, void)
4227iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4228{
4229 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4230 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4231 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4232 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4233 iemFpuMaybePopOne(pCtx);
4234}
4235
4236
4237DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4238{
4239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4240 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4241 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4242 iemFpuMaybePopOne(pCtx);
4243 iemFpuMaybePopOne(pCtx);
4244}
4245
4246
4247DECL_NO_INLINE(static, void)
4248iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4249{
4250 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4251 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4252
4253 if (pCtx->fpu.FCW & X86_FCW_IM)
4254 {
4255 /* Masked overflow - Push QNaN. */
4256 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4257 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4258 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4259 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4260 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4261 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4262 iemFpuRotateStackPush(pCtx);
4263 }
4264 else
4265 {
4266 /* Exception pending - don't change TOP or the register stack. */
4267 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4268 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4269 }
4270}
4271
4272
4273DECL_NO_INLINE(static, void)
4274iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4275{
4276 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4277 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4278
4279 if (pCtx->fpu.FCW & X86_FCW_IM)
4280 {
4281 /* Masked overflow - Push QNaN. */
4282 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4283 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4284 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4285 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4286 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4287 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4288 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4289 iemFpuRotateStackPush(pCtx);
4290 }
4291 else
4292 {
4293 /* Exception pending - don't change TOP or the register stack. */
4294 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4295 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4296 }
4297}
4298
4299
4300/**
4301 * Worker routine for raising an FPU stack overflow exception on a push.
4302 *
4303 * @param pIemCpu The IEM per CPU data.
4304 * @param pCtx The CPU context.
4305 */
4306static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4307{
4308 if (pCtx->fpu.FCW & X86_FCW_IM)
4309 {
4310 /* Masked overflow. */
4311 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4312 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4313 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4314 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4315 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4316 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4317 iemFpuRotateStackPush(pCtx);
4318 }
4319 else
4320 {
4321 /* Exception pending - don't change TOP or the register stack. */
4322 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4323 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4324 }
4325}
4326
4327
4328/**
4329 * Raises a FPU stack overflow exception on a push.
4330 *
4331 * @param pIemCpu The IEM per CPU data.
4332 */
4333DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4334{
4335 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4336 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4337 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4338}
4339
4340
4341/**
4342 * Raises a FPU stack overflow exception on a push with a memory operand.
4343 *
4344 * @param pIemCpu The IEM per CPU data.
4345 * @param iEffSeg The effective memory operand selector register.
4346 * @param GCPtrEff The effective memory operand offset.
4347 */
4348DECL_NO_INLINE(static, void)
4349iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4350{
4351 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4352 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4353 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4354 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4355}
4356
4357
4358static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4359{
4360 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4361 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4362 if (pCtx->fpu.FTW & RT_BIT(iReg))
4363 return VINF_SUCCESS;
4364 return VERR_NOT_FOUND;
4365}
4366
4367
4368static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4369{
4370 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4371 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4372 if (pCtx->fpu.FTW & RT_BIT(iReg))
4373 {
4374 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4375 return VINF_SUCCESS;
4376 }
4377 return VERR_NOT_FOUND;
4378}
4379
4380
4381static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4382 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4383{
4384 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4385 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4386 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4387 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4388 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4389 {
4390 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4391 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4392 return VINF_SUCCESS;
4393 }
4394 return VERR_NOT_FOUND;
4395}
4396
4397
4398static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4399{
4400 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4401 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4402 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4403 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4404 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4405 {
4406 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4407 return VINF_SUCCESS;
4408 }
4409 return VERR_NOT_FOUND;
4410}
4411
4412
4413/**
4414 * Updates the FPU exception status after FCW is changed.
4415 *
4416 * @param pCtx The CPU context.
4417 */
4418static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4419{
4420 uint16_t u16Fsw = pCtx->fpu.FSW;
4421 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4422 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4423 else
4424 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4425 pCtx->fpu.FSW = u16Fsw;
4426}
4427
4428
4429/**
4430 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4431 *
4432 * @returns The full FTW.
4433 * @param pCtx The CPU state.
4434 */
4435static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4436{
4437 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4438 uint16_t u16Ftw = 0;
4439 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4440 for (unsigned iSt = 0; iSt < 8; iSt++)
4441 {
4442 unsigned const iReg = (iSt + iTop) & 7;
4443 if (!(u8Ftw & RT_BIT(iReg)))
4444 u16Ftw |= 3 << (iReg * 2); /* empty */
4445 else
4446 {
4447 uint16_t uTag;
4448 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4449 if (pr80Reg->s.uExponent == 0x7fff)
4450 uTag = 2; /* Exponent is all 1's => Special. */
4451 else if (pr80Reg->s.uExponent == 0x0000)
4452 {
4453 if (pr80Reg->s.u64Mantissa == 0x0000)
4454 uTag = 1; /* All bits are zero => Zero. */
4455 else
4456 uTag = 2; /* Must be special. */
4457 }
4458 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4459 uTag = 0; /* Valid. */
4460 else
4461 uTag = 2; /* Must be special. */
4462
4463 u16Ftw |= uTag << (iReg * 2); /* empty */
4464 }
4465 }
4466
4467 return u16Ftw;
4468}
4469
4470
4471/**
4472 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4473 *
4474 * @returns The compressed FTW.
4475 * @param u16FullFtw The full FTW to convert.
4476 */
4477static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4478{
4479 uint8_t u8Ftw = 0;
4480 for (unsigned i = 0; i < 8; i++)
4481 {
4482 if ((u16FullFtw & 3) != 3 /*empty*/)
4483 u8Ftw |= RT_BIT(i);
4484 u16FullFtw >>= 2;
4485 }
4486
4487 return u8Ftw;
4488}
4489
4490/** @} */
4491
4492
4493/** @name Memory access.
4494 *
4495 * @{
4496 */
4497
4498
4499/**
4500 * Updates the IEMCPU::cbWritten counter if applicable.
4501 *
4502 * @param pIemCpu The IEM per CPU data.
4503 * @param fAccess The access being accounted for.
4504 * @param cbMem The access size.
4505 */
4506DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4507{
4508 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4509 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4510 pIemCpu->cbWritten += (uint32_t)cbMem;
4511}
4512
4513
4514/**
4515 * Checks if the given segment can be written to, raise the appropriate
4516 * exception if not.
4517 *
4518 * @returns VBox strict status code.
4519 *
4520 * @param pIemCpu The IEM per CPU data.
4521 * @param pHid Pointer to the hidden register.
4522 * @param iSegReg The register number.
4523 */
4524static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4525{
4526 if (!pHid->Attr.n.u1Present)
4527 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4528
4529 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4530 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4531 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4532 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4533
4534 /** @todo DPL/RPL/CPL? */
4535
4536 return VINF_SUCCESS;
4537}
4538
4539
4540/**
4541 * Checks if the given segment can be read from, raise the appropriate
4542 * exception if not.
4543 *
4544 * @returns VBox strict status code.
4545 *
4546 * @param pIemCpu The IEM per CPU data.
4547 * @param pHid Pointer to the hidden register.
4548 * @param iSegReg The register number.
4549 */
4550static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4551{
4552 if (!pHid->Attr.n.u1Present)
4553 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4554
4555 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4556 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4557 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4558
4559 /** @todo DPL/RPL/CPL? */
4560
4561 return VINF_SUCCESS;
4562}
4563
4564
4565/**
4566 * Applies the segment limit, base and attributes.
4567 *
4568 * This may raise a \#GP or \#SS.
4569 *
4570 * @returns VBox strict status code.
4571 *
4572 * @param pIemCpu The IEM per CPU data.
4573 * @param fAccess The kind of access which is being performed.
4574 * @param iSegReg The index of the segment register to apply.
4575 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4576 * TSS, ++).
4577 * @param pGCPtrMem Pointer to the guest memory address to apply
4578 * segmentation to. Input and output parameter.
4579 */
4580static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4581 size_t cbMem, PRTGCPTR pGCPtrMem)
4582{
4583 if (iSegReg == UINT8_MAX)
4584 return VINF_SUCCESS;
4585
4586 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4587 switch (pIemCpu->enmCpuMode)
4588 {
4589 case IEMMODE_16BIT:
4590 case IEMMODE_32BIT:
4591 {
4592 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4593 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4594
4595 Assert(pSel->Attr.n.u1Present);
4596 Assert(pSel->Attr.n.u1DescType);
4597 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4598 {
4599 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4600 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4601 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4602
4603 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4604 {
4605 /** @todo CPL check. */
4606 }
4607
4608 /*
4609 * There are two kinds of data selectors, normal and expand down.
4610 */
4611 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4612 {
4613 if ( GCPtrFirst32 > pSel->u32Limit
4614 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4615 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4616
4617 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4618 }
4619 else
4620 {
4621 /** @todo implement expand down segments. */
4622 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4623 }
4624 }
4625 else
4626 {
4627
4628 /*
4629 * Code selector and usually be used to read thru, writing is
4630 * only permitted in real and V8086 mode.
4631 */
4632 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4633 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4634 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4635 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4636 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4637
4638 if ( GCPtrFirst32 > pSel->u32Limit
4639 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4640 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4641
4642 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4643 {
4644 /** @todo CPL check. */
4645 }
4646
4647 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4648 }
4649 return VINF_SUCCESS;
4650 }
4651
4652 case IEMMODE_64BIT:
4653 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4654 *pGCPtrMem += pSel->u64Base;
4655 return VINF_SUCCESS;
4656
4657 default:
4658 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4659 }
4660}
4661
4662
4663/**
4664 * Translates a virtual address to a physical physical address and checks if we
4665 * can access the page as specified.
4666 *
4667 * @param pIemCpu The IEM per CPU data.
4668 * @param GCPtrMem The virtual address.
4669 * @param fAccess The intended access.
4670 * @param pGCPhysMem Where to return the physical address.
4671 */
4672static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4673 PRTGCPHYS pGCPhysMem)
4674{
4675 /** @todo Need a different PGM interface here. We're currently using
4676 * generic / REM interfaces. this won't cut it for R0 & RC. */
4677 RTGCPHYS GCPhys;
4678 uint64_t fFlags;
4679 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4680 if (RT_FAILURE(rc))
4681 {
4682 /** @todo Check unassigned memory in unpaged mode. */
4683 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4684 *pGCPhysMem = NIL_RTGCPHYS;
4685 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4686 }
4687
4688 /* If the page is writable and does not have the no-exec bit set, all
4689 access is allowed. Otherwise we'll have to check more carefully... */
4690 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4691 {
4692 /* Write to read only memory? */
4693 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4694 && !(fFlags & X86_PTE_RW)
4695 && ( pIemCpu->uCpl != 0
4696 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4697 {
4698 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4699 *pGCPhysMem = NIL_RTGCPHYS;
4700 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4701 }
4702
4703 /* Kernel memory accessed by userland? */
4704 if ( !(fFlags & X86_PTE_US)
4705 && pIemCpu->uCpl == 3
4706 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4707 {
4708 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4709 *pGCPhysMem = NIL_RTGCPHYS;
4710 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4711 }
4712
4713 /* Executing non-executable memory? */
4714 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4715 && (fFlags & X86_PTE_PAE_NX)
4716 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4717 {
4718 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4719 *pGCPhysMem = NIL_RTGCPHYS;
4720 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4721 VERR_ACCESS_DENIED);
4722 }
4723 }
4724
4725 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4726 *pGCPhysMem = GCPhys;
4727 return VINF_SUCCESS;
4728}
4729
4730
4731
4732/**
4733 * Maps a physical page.
4734 *
4735 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4736 * @param pIemCpu The IEM per CPU data.
4737 * @param GCPhysMem The physical address.
4738 * @param fAccess The intended access.
4739 * @param ppvMem Where to return the mapping address.
4740 * @param pLock The PGM lock.
4741 */
4742static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
4743{
4744#ifdef IEM_VERIFICATION_MODE_FULL
4745 /* Force the alternative path so we can ignore writes. */
4746 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4747 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4748#endif
4749#ifdef IEM_LOG_MEMORY_WRITES
4750 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4751 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4752#endif
4753#ifdef IEM_VERIFICATION_MODE_MINIMAL
4754 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4755#endif
4756
4757 /** @todo This API may require some improving later. A private deal with PGM
4758 * regarding locking and unlocking needs to be struct. A couple of TLBs
4759 * living in PGM, but with publicly accessible inlined access methods
4760 * could perhaps be an even better solution. */
4761 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4762 GCPhysMem,
4763 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4764 pIemCpu->fBypassHandlers,
4765 ppvMem,
4766 pLock);
4767 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4768 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4769 return rc;
4770}
4771
4772
4773/**
4774 * Unmap a page previously mapped by iemMemPageMap.
4775 *
4776 * @param pIemCpu The IEM per CPU data.
4777 * @param GCPhysMem The physical address.
4778 * @param fAccess The intended access.
4779 * @param pvMem What iemMemPageMap returned.
4780 * @param pLock The PGM lock.
4781 */
4782DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
4783{
4784 NOREF(pIemCpu);
4785 NOREF(GCPhysMem);
4786 NOREF(fAccess);
4787 NOREF(pvMem);
4788 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
4789}
4790
4791
4792/**
4793 * Looks up a memory mapping entry.
4794 *
4795 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4796 * @param pIemCpu The IEM per CPU data.
4797 * @param pvMem The memory address.
4798 * @param fAccess The access to.
4799 */
4800DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4801{
4802 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4803 if ( pIemCpu->aMemMappings[0].pv == pvMem
4804 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4805 return 0;
4806 if ( pIemCpu->aMemMappings[1].pv == pvMem
4807 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4808 return 1;
4809 if ( pIemCpu->aMemMappings[2].pv == pvMem
4810 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4811 return 2;
4812 return VERR_NOT_FOUND;
4813}
4814
4815
4816/**
4817 * Finds a free memmap entry when using iNextMapping doesn't work.
4818 *
4819 * @returns Memory mapping index, 1024 on failure.
4820 * @param pIemCpu The IEM per CPU data.
4821 */
4822static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4823{
4824 /*
4825 * The easy case.
4826 */
4827 if (pIemCpu->cActiveMappings == 0)
4828 {
4829 pIemCpu->iNextMapping = 1;
4830 return 0;
4831 }
4832
4833 /* There should be enough mappings for all instructions. */
4834 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4835
4836 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4837 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4838 return i;
4839
4840 AssertFailedReturn(1024);
4841}
4842
4843
4844/**
4845 * Commits a bounce buffer that needs writing back and unmaps it.
4846 *
4847 * @returns Strict VBox status code.
4848 * @param pIemCpu The IEM per CPU data.
4849 * @param iMemMap The index of the buffer to commit.
4850 */
4851static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4852{
4853 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4854 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4855
4856 /*
4857 * Do the writing.
4858 */
4859 int rc;
4860#ifndef IEM_VERIFICATION_MODE_MINIMAL
4861 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4862 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4863 {
4864 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4865 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4866 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4867 if (!pIemCpu->fBypassHandlers)
4868 {
4869 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4870 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4871 pbBuf,
4872 cbFirst);
4873 if (cbSecond && rc == VINF_SUCCESS)
4874 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4875 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4876 pbBuf + cbFirst,
4877 cbSecond);
4878 }
4879 else
4880 {
4881 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4882 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4883 pbBuf,
4884 cbFirst);
4885 if (cbSecond && rc == VINF_SUCCESS)
4886 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4887 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4888 pbBuf + cbFirst,
4889 cbSecond);
4890 }
4891 if (rc != VINF_SUCCESS)
4892 {
4893 /** @todo status code handling */
4894 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
4895 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
4896 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4897 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
4898 }
4899 }
4900 else
4901#endif
4902 rc = VINF_SUCCESS;
4903
4904#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
4905 /*
4906 * Record the write(s).
4907 */
4908 if (!pIemCpu->fNoRem)
4909 {
4910 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4911 if (pEvtRec)
4912 {
4913 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4914 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4915 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4916 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4917 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4918 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4919 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4920 }
4921 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4922 {
4923 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4924 if (pEvtRec)
4925 {
4926 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4927 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4928 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4929 memcpy(pEvtRec->u.RamWrite.ab,
4930 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4931 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4932 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4933 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4934 }
4935 }
4936 }
4937#endif
4938#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
4939 if (rc == VINF_SUCCESS)
4940 {
4941 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4942 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
4943 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4944 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4945 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
4946 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
4947
4948 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4949 g_cbIemWrote = cbWrote;
4950 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
4951 }
4952#endif
4953
4954 /*
4955 * Free the mapping entry.
4956 */
4957 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4958 Assert(pIemCpu->cActiveMappings != 0);
4959 pIemCpu->cActiveMappings--;
4960 return rc;
4961}
4962
4963
4964/**
4965 * iemMemMap worker that deals with a request crossing pages.
4966 */
4967static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4968 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4969{
4970 /*
4971 * Do the address translations.
4972 */
4973 RTGCPHYS GCPhysFirst;
4974 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4975 if (rcStrict != VINF_SUCCESS)
4976 return rcStrict;
4977
4978/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
4979 * last byte. */
4980 RTGCPHYS GCPhysSecond;
4981 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4982 if (rcStrict != VINF_SUCCESS)
4983 return rcStrict;
4984 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4985
4986 /*
4987 * Read in the current memory content if it's a read, execute or partial
4988 * write access.
4989 */
4990 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4991 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4992 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4993
4994 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4995 {
4996 int rc;
4997 if (!pIemCpu->fBypassHandlers)
4998 {
4999 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5000 if (rc != VINF_SUCCESS)
5001 {
5002 /** @todo status code handling */
5003 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5004 return rc;
5005 }
5006 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5007 if (rc != VINF_SUCCESS)
5008 {
5009 /** @todo status code handling */
5010 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5011 return rc;
5012 }
5013 }
5014 else
5015 {
5016 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5017 if (rc != VINF_SUCCESS)
5018 {
5019 /** @todo status code handling */
5020 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5021 return rc;
5022 }
5023 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5024 if (rc != VINF_SUCCESS)
5025 {
5026 /** @todo status code handling */
5027 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5028 return rc;
5029 }
5030 }
5031
5032#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5033 if ( !pIemCpu->fNoRem
5034 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5035 {
5036 /*
5037 * Record the reads.
5038 */
5039 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5040 if (pEvtRec)
5041 {
5042 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5043 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5044 pEvtRec->u.RamRead.cb = cbFirstPage;
5045 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5046 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5047 }
5048 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5049 if (pEvtRec)
5050 {
5051 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5052 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5053 pEvtRec->u.RamRead.cb = cbSecondPage;
5054 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5055 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5056 }
5057 }
5058#endif
5059 }
5060#ifdef VBOX_STRICT
5061 else
5062 memset(pbBuf, 0xcc, cbMem);
5063#endif
5064#ifdef VBOX_STRICT
5065 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5066 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5067#endif
5068
5069 /*
5070 * Commit the bounce buffer entry.
5071 */
5072 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5073 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5074 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5075 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5076 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5077 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5078 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5079 pIemCpu->cActiveMappings++;
5080
5081 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5082 *ppvMem = pbBuf;
5083 return VINF_SUCCESS;
5084}
5085
5086
5087/**
5088 * iemMemMap woker that deals with iemMemPageMap failures.
5089 */
5090static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5091 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5092{
5093 /*
5094 * Filter out conditions we can handle and the ones which shouldn't happen.
5095 */
5096 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5097 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5098 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5099 {
5100 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5101 return rcMap;
5102 }
5103 pIemCpu->cPotentialExits++;
5104
5105 /*
5106 * Read in the current memory content if it's a read, execute or partial
5107 * write access.
5108 */
5109 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5110 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5111 {
5112 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5113 memset(pbBuf, 0xff, cbMem);
5114 else
5115 {
5116 int rc;
5117 if (!pIemCpu->fBypassHandlers)
5118 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5119 else
5120 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5121 if (rc != VINF_SUCCESS)
5122 {
5123 /** @todo status code handling */
5124 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5125 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5126 return rc;
5127 }
5128 }
5129
5130#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5131 if ( !pIemCpu->fNoRem
5132 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5133 {
5134 /*
5135 * Record the read.
5136 */
5137 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5138 if (pEvtRec)
5139 {
5140 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5141 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5142 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5143 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5144 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5145 }
5146 }
5147#endif
5148 }
5149#ifdef VBOX_STRICT
5150 else
5151 memset(pbBuf, 0xcc, cbMem);
5152#endif
5153#ifdef VBOX_STRICT
5154 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5155 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5156#endif
5157
5158 /*
5159 * Commit the bounce buffer entry.
5160 */
5161 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5162 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5163 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5164 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5165 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5166 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5167 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5168 pIemCpu->cActiveMappings++;
5169
5170 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5171 *ppvMem = pbBuf;
5172 return VINF_SUCCESS;
5173}
5174
5175
5176
5177/**
5178 * Maps the specified guest memory for the given kind of access.
5179 *
5180 * This may be using bounce buffering of the memory if it's crossing a page
5181 * boundary or if there is an access handler installed for any of it. Because
5182 * of lock prefix guarantees, we're in for some extra clutter when this
5183 * happens.
5184 *
5185 * This may raise a \#GP, \#SS, \#PF or \#AC.
5186 *
5187 * @returns VBox strict status code.
5188 *
5189 * @param pIemCpu The IEM per CPU data.
5190 * @param ppvMem Where to return the pointer to the mapped
5191 * memory.
5192 * @param cbMem The number of bytes to map. This is usually 1,
5193 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5194 * string operations it can be up to a page.
5195 * @param iSegReg The index of the segment register to use for
5196 * this access. The base and limits are checked.
5197 * Use UINT8_MAX to indicate that no segmentation
5198 * is required (for IDT, GDT and LDT accesses).
5199 * @param GCPtrMem The address of the guest memory.
5200 * @param a_fAccess How the memory is being accessed. The
5201 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5202 * how to map the memory, while the
5203 * IEM_ACCESS_WHAT_XXX bit is used when raising
5204 * exceptions.
5205 */
5206static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5207{
5208 /*
5209 * Check the input and figure out which mapping entry to use.
5210 */
5211 Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94);
5212 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5213
5214 unsigned iMemMap = pIemCpu->iNextMapping;
5215 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5216 {
5217 iMemMap = iemMemMapFindFree(pIemCpu);
5218 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5219 }
5220
5221 /*
5222 * Map the memory, checking that we can actually access it. If something
5223 * slightly complicated happens, fall back on bounce buffering.
5224 */
5225 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5226 if (rcStrict != VINF_SUCCESS)
5227 return rcStrict;
5228
5229 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5230 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5231
5232 RTGCPHYS GCPhysFirst;
5233 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5234 if (rcStrict != VINF_SUCCESS)
5235 return rcStrict;
5236
5237 void *pvMem;
5238 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5239 if (rcStrict != VINF_SUCCESS)
5240 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5241
5242 /*
5243 * Fill in the mapping table entry.
5244 */
5245 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5246 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5247 pIemCpu->iNextMapping = iMemMap + 1;
5248 pIemCpu->cActiveMappings++;
5249
5250 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5251 *ppvMem = pvMem;
5252 return VINF_SUCCESS;
5253}
5254
5255
5256/**
5257 * Commits the guest memory if bounce buffered and unmaps it.
5258 *
5259 * @returns Strict VBox status code.
5260 * @param pIemCpu The IEM per CPU data.
5261 * @param pvMem The mapping.
5262 * @param fAccess The kind of access.
5263 */
5264static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5265{
5266 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5267 AssertReturn(iMemMap >= 0, iMemMap);
5268
5269 /* If it's bounce buffered, we may need to write back the buffer. */
5270 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5271 {
5272 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5273 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5274 }
5275 /* Otherwise unlock it. */
5276 else
5277 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5278
5279 /* Free the entry. */
5280 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5281 Assert(pIemCpu->cActiveMappings != 0);
5282 pIemCpu->cActiveMappings--;
5283 return VINF_SUCCESS;
5284}
5285
5286
5287/**
5288 * Fetches a data byte.
5289 *
5290 * @returns Strict VBox status code.
5291 * @param pIemCpu The IEM per CPU data.
5292 * @param pu8Dst Where to return the byte.
5293 * @param iSegReg The index of the segment register to use for
5294 * this access. The base and limits are checked.
5295 * @param GCPtrMem The address of the guest memory.
5296 */
5297static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5298{
5299 /* The lazy approach for now... */
5300 uint8_t const *pu8Src;
5301 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5302 if (rc == VINF_SUCCESS)
5303 {
5304 *pu8Dst = *pu8Src;
5305 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5306 }
5307 return rc;
5308}
5309
5310
5311/**
5312 * Fetches a data word.
5313 *
5314 * @returns Strict VBox status code.
5315 * @param pIemCpu The IEM per CPU data.
5316 * @param pu16Dst Where to return the word.
5317 * @param iSegReg The index of the segment register to use for
5318 * this access. The base and limits are checked.
5319 * @param GCPtrMem The address of the guest memory.
5320 */
5321static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5322{
5323 /* The lazy approach for now... */
5324 uint16_t const *pu16Src;
5325 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5326 if (rc == VINF_SUCCESS)
5327 {
5328 *pu16Dst = *pu16Src;
5329 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5330 }
5331 return rc;
5332}
5333
5334
5335/**
5336 * Fetches a data dword.
5337 *
5338 * @returns Strict VBox status code.
5339 * @param pIemCpu The IEM per CPU data.
5340 * @param pu32Dst Where to return the dword.
5341 * @param iSegReg The index of the segment register to use for
5342 * this access. The base and limits are checked.
5343 * @param GCPtrMem The address of the guest memory.
5344 */
5345static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5346{
5347 /* The lazy approach for now... */
5348 uint32_t const *pu32Src;
5349 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5350 if (rc == VINF_SUCCESS)
5351 {
5352 *pu32Dst = *pu32Src;
5353 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5354 }
5355 return rc;
5356}
5357
5358
5359#ifdef SOME_UNUSED_FUNCTION
5360/**
5361 * Fetches a data dword and sign extends it to a qword.
5362 *
5363 * @returns Strict VBox status code.
5364 * @param pIemCpu The IEM per CPU data.
5365 * @param pu64Dst Where to return the sign extended value.
5366 * @param iSegReg The index of the segment register to use for
5367 * this access. The base and limits are checked.
5368 * @param GCPtrMem The address of the guest memory.
5369 */
5370static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5371{
5372 /* The lazy approach for now... */
5373 int32_t const *pi32Src;
5374 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5375 if (rc == VINF_SUCCESS)
5376 {
5377 *pu64Dst = *pi32Src;
5378 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5379 }
5380#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5381 else
5382 *pu64Dst = 0;
5383#endif
5384 return rc;
5385}
5386#endif
5387
5388
5389/**
5390 * Fetches a data qword.
5391 *
5392 * @returns Strict VBox status code.
5393 * @param pIemCpu The IEM per CPU data.
5394 * @param pu64Dst Where to return the qword.
5395 * @param iSegReg The index of the segment register to use for
5396 * this access. The base and limits are checked.
5397 * @param GCPtrMem The address of the guest memory.
5398 */
5399static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5400{
5401 /* The lazy approach for now... */
5402 uint64_t const *pu64Src;
5403 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5404 if (rc == VINF_SUCCESS)
5405 {
5406 *pu64Dst = *pu64Src;
5407 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5408 }
5409 return rc;
5410}
5411
5412
5413/**
5414 * Fetches a data tword.
5415 *
5416 * @returns Strict VBox status code.
5417 * @param pIemCpu The IEM per CPU data.
5418 * @param pr80Dst Where to return the tword.
5419 * @param iSegReg The index of the segment register to use for
5420 * this access. The base and limits are checked.
5421 * @param GCPtrMem The address of the guest memory.
5422 */
5423static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5424{
5425 /* The lazy approach for now... */
5426 PCRTFLOAT80U pr80Src;
5427 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5428 if (rc == VINF_SUCCESS)
5429 {
5430 *pr80Dst = *pr80Src;
5431 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5432 }
5433 return rc;
5434}
5435
5436
5437/**
5438 * Fetches a descriptor register (lgdt, lidt).
5439 *
5440 * @returns Strict VBox status code.
5441 * @param pIemCpu The IEM per CPU data.
5442 * @param pcbLimit Where to return the limit.
5443 * @param pGCPTrBase Where to return the base.
5444 * @param iSegReg The index of the segment register to use for
5445 * this access. The base and limits are checked.
5446 * @param GCPtrMem The address of the guest memory.
5447 * @param enmOpSize The effective operand size.
5448 */
5449static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5450 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5451{
5452 uint8_t const *pu8Src;
5453 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5454 (void **)&pu8Src,
5455 enmOpSize == IEMMODE_64BIT
5456 ? 2 + 8
5457 : enmOpSize == IEMMODE_32BIT
5458 ? 2 + 4
5459 : 2 + 3,
5460 iSegReg,
5461 GCPtrMem,
5462 IEM_ACCESS_DATA_R);
5463 if (rcStrict == VINF_SUCCESS)
5464 {
5465 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5466 switch (enmOpSize)
5467 {
5468 case IEMMODE_16BIT:
5469 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5470 break;
5471 case IEMMODE_32BIT:
5472 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5473 break;
5474 case IEMMODE_64BIT:
5475 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5476 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5477 break;
5478
5479 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5480 }
5481 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5482 }
5483 return rcStrict;
5484}
5485
5486
5487
5488/**
5489 * Stores a data byte.
5490 *
5491 * @returns Strict VBox status code.
5492 * @param pIemCpu The IEM per CPU data.
5493 * @param iSegReg The index of the segment register to use for
5494 * this access. The base and limits are checked.
5495 * @param GCPtrMem The address of the guest memory.
5496 * @param u8Value The value to store.
5497 */
5498static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5499{
5500 /* The lazy approach for now... */
5501 uint8_t *pu8Dst;
5502 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5503 if (rc == VINF_SUCCESS)
5504 {
5505 *pu8Dst = u8Value;
5506 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5507 }
5508 return rc;
5509}
5510
5511
5512/**
5513 * Stores a data word.
5514 *
5515 * @returns Strict VBox status code.
5516 * @param pIemCpu The IEM per CPU data.
5517 * @param iSegReg The index of the segment register to use for
5518 * this access. The base and limits are checked.
5519 * @param GCPtrMem The address of the guest memory.
5520 * @param u16Value The value to store.
5521 */
5522static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5523{
5524 /* The lazy approach for now... */
5525 uint16_t *pu16Dst;
5526 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5527 if (rc == VINF_SUCCESS)
5528 {
5529 *pu16Dst = u16Value;
5530 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5531 }
5532 return rc;
5533}
5534
5535
5536/**
5537 * Stores a data dword.
5538 *
5539 * @returns Strict VBox status code.
5540 * @param pIemCpu The IEM per CPU data.
5541 * @param iSegReg The index of the segment register to use for
5542 * this access. The base and limits are checked.
5543 * @param GCPtrMem The address of the guest memory.
5544 * @param u32Value The value to store.
5545 */
5546static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5547{
5548 /* The lazy approach for now... */
5549 uint32_t *pu32Dst;
5550 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5551 if (rc == VINF_SUCCESS)
5552 {
5553 *pu32Dst = u32Value;
5554 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5555 }
5556 return rc;
5557}
5558
5559
5560/**
5561 * Stores a data qword.
5562 *
5563 * @returns Strict VBox status code.
5564 * @param pIemCpu The IEM per CPU data.
5565 * @param iSegReg The index of the segment register to use for
5566 * this access. The base and limits are checked.
5567 * @param GCPtrMem The address of the guest memory.
5568 * @param u64Value The value to store.
5569 */
5570static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5571{
5572 /* The lazy approach for now... */
5573 uint64_t *pu64Dst;
5574 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5575 if (rc == VINF_SUCCESS)
5576 {
5577 *pu64Dst = u64Value;
5578 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5579 }
5580 return rc;
5581}
5582
5583
5584/**
5585 * Stores a descriptor register (sgdt, sidt).
5586 *
5587 * @returns Strict VBox status code.
5588 * @param pIemCpu The IEM per CPU data.
5589 * @param cbLimit The limit.
5590 * @param GCPTrBase The base address.
5591 * @param iSegReg The index of the segment register to use for
5592 * this access. The base and limits are checked.
5593 * @param GCPtrMem The address of the guest memory.
5594 * @param enmOpSize The effective operand size.
5595 */
5596static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5597 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5598{
5599 uint8_t *pu8Src;
5600 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5601 (void **)&pu8Src,
5602 enmOpSize == IEMMODE_64BIT
5603 ? 2 + 8
5604 : enmOpSize == IEMMODE_32BIT
5605 ? 2 + 4
5606 : 2 + 3,
5607 iSegReg,
5608 GCPtrMem,
5609 IEM_ACCESS_DATA_W);
5610 if (rcStrict == VINF_SUCCESS)
5611 {
5612 pu8Src[0] = RT_BYTE1(cbLimit);
5613 pu8Src[1] = RT_BYTE2(cbLimit);
5614 pu8Src[2] = RT_BYTE1(GCPtrBase);
5615 pu8Src[3] = RT_BYTE2(GCPtrBase);
5616 pu8Src[4] = RT_BYTE3(GCPtrBase);
5617 if (enmOpSize == IEMMODE_16BIT)
5618 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5619 else
5620 {
5621 pu8Src[5] = RT_BYTE4(GCPtrBase);
5622 if (enmOpSize == IEMMODE_64BIT)
5623 {
5624 pu8Src[6] = RT_BYTE5(GCPtrBase);
5625 pu8Src[7] = RT_BYTE6(GCPtrBase);
5626 pu8Src[8] = RT_BYTE7(GCPtrBase);
5627 pu8Src[9] = RT_BYTE8(GCPtrBase);
5628 }
5629 }
5630 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5631 }
5632 return rcStrict;
5633}
5634
5635
5636/**
5637 * Pushes a word onto the stack.
5638 *
5639 * @returns Strict VBox status code.
5640 * @param pIemCpu The IEM per CPU data.
5641 * @param u16Value The value to push.
5642 */
5643static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5644{
5645 /* Increment the stack pointer. */
5646 uint64_t uNewRsp;
5647 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5648 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5649
5650 /* Write the word the lazy way. */
5651 uint16_t *pu16Dst;
5652 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5653 if (rc == VINF_SUCCESS)
5654 {
5655 *pu16Dst = u16Value;
5656 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5657 }
5658
5659 /* Commit the new RSP value unless we an access handler made trouble. */
5660 if (rc == VINF_SUCCESS)
5661 pCtx->rsp = uNewRsp;
5662
5663 return rc;
5664}
5665
5666
5667/**
5668 * Pushes a dword onto the stack.
5669 *
5670 * @returns Strict VBox status code.
5671 * @param pIemCpu The IEM per CPU data.
5672 * @param u32Value The value to push.
5673 */
5674static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5675{
5676 /* Increment the stack pointer. */
5677 uint64_t uNewRsp;
5678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5680
5681 /* Write the word the lazy way. */
5682 uint32_t *pu32Dst;
5683 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5684 if (rc == VINF_SUCCESS)
5685 {
5686 *pu32Dst = u32Value;
5687 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5688 }
5689
5690 /* Commit the new RSP value unless we an access handler made trouble. */
5691 if (rc == VINF_SUCCESS)
5692 pCtx->rsp = uNewRsp;
5693
5694 return rc;
5695}
5696
5697
5698/**
5699 * Pushes a qword onto the stack.
5700 *
5701 * @returns Strict VBox status code.
5702 * @param pIemCpu The IEM per CPU data.
5703 * @param u64Value The value to push.
5704 */
5705static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5706{
5707 /* Increment the stack pointer. */
5708 uint64_t uNewRsp;
5709 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5710 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5711
5712 /* Write the word the lazy way. */
5713 uint64_t *pu64Dst;
5714 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5715 if (rc == VINF_SUCCESS)
5716 {
5717 *pu64Dst = u64Value;
5718 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5719 }
5720
5721 /* Commit the new RSP value unless we an access handler made trouble. */
5722 if (rc == VINF_SUCCESS)
5723 pCtx->rsp = uNewRsp;
5724
5725 return rc;
5726}
5727
5728
5729/**
5730 * Pops a word from the stack.
5731 *
5732 * @returns Strict VBox status code.
5733 * @param pIemCpu The IEM per CPU data.
5734 * @param pu16Value Where to store the popped value.
5735 */
5736static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5737{
5738 /* Increment the stack pointer. */
5739 uint64_t uNewRsp;
5740 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5741 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5742
5743 /* Write the word the lazy way. */
5744 uint16_t const *pu16Src;
5745 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5746 if (rc == VINF_SUCCESS)
5747 {
5748 *pu16Value = *pu16Src;
5749 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5750
5751 /* Commit the new RSP value. */
5752 if (rc == VINF_SUCCESS)
5753 pCtx->rsp = uNewRsp;
5754 }
5755
5756 return rc;
5757}
5758
5759
5760/**
5761 * Pops a dword from the stack.
5762 *
5763 * @returns Strict VBox status code.
5764 * @param pIemCpu The IEM per CPU data.
5765 * @param pu32Value Where to store the popped value.
5766 */
5767static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5768{
5769 /* Increment the stack pointer. */
5770 uint64_t uNewRsp;
5771 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5772 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5773
5774 /* Write the word the lazy way. */
5775 uint32_t const *pu32Src;
5776 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5777 if (rc == VINF_SUCCESS)
5778 {
5779 *pu32Value = *pu32Src;
5780 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5781
5782 /* Commit the new RSP value. */
5783 if (rc == VINF_SUCCESS)
5784 pCtx->rsp = uNewRsp;
5785 }
5786
5787 return rc;
5788}
5789
5790
5791/**
5792 * Pops a qword from the stack.
5793 *
5794 * @returns Strict VBox status code.
5795 * @param pIemCpu The IEM per CPU data.
5796 * @param pu64Value Where to store the popped value.
5797 */
5798static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5799{
5800 /* Increment the stack pointer. */
5801 uint64_t uNewRsp;
5802 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5803 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5804
5805 /* Write the word the lazy way. */
5806 uint64_t const *pu64Src;
5807 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5808 if (rc == VINF_SUCCESS)
5809 {
5810 *pu64Value = *pu64Src;
5811 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5812
5813 /* Commit the new RSP value. */
5814 if (rc == VINF_SUCCESS)
5815 pCtx->rsp = uNewRsp;
5816 }
5817
5818 return rc;
5819}
5820
5821
5822/**
5823 * Pushes a word onto the stack, using a temporary stack pointer.
5824 *
5825 * @returns Strict VBox status code.
5826 * @param pIemCpu The IEM per CPU data.
5827 * @param u16Value The value to push.
5828 * @param pTmpRsp Pointer to the temporary stack pointer.
5829 */
5830static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5831{
5832 /* Increment the stack pointer. */
5833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5834 RTUINT64U NewRsp = *pTmpRsp;
5835 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5836
5837 /* Write the word the lazy way. */
5838 uint16_t *pu16Dst;
5839 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5840 if (rc == VINF_SUCCESS)
5841 {
5842 *pu16Dst = u16Value;
5843 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5844 }
5845
5846 /* Commit the new RSP value unless we an access handler made trouble. */
5847 if (rc == VINF_SUCCESS)
5848 *pTmpRsp = NewRsp;
5849
5850 return rc;
5851}
5852
5853
5854/**
5855 * Pushes a dword onto the stack, using a temporary stack pointer.
5856 *
5857 * @returns Strict VBox status code.
5858 * @param pIemCpu The IEM per CPU data.
5859 * @param u32Value The value to push.
5860 * @param pTmpRsp Pointer to the temporary stack pointer.
5861 */
5862static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5863{
5864 /* Increment the stack pointer. */
5865 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5866 RTUINT64U NewRsp = *pTmpRsp;
5867 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5868
5869 /* Write the word the lazy way. */
5870 uint32_t *pu32Dst;
5871 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5872 if (rc == VINF_SUCCESS)
5873 {
5874 *pu32Dst = u32Value;
5875 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5876 }
5877
5878 /* Commit the new RSP value unless we an access handler made trouble. */
5879 if (rc == VINF_SUCCESS)
5880 *pTmpRsp = NewRsp;
5881
5882 return rc;
5883}
5884
5885
5886/**
5887 * Pushes a dword onto the stack, using a temporary stack pointer.
5888 *
5889 * @returns Strict VBox status code.
5890 * @param pIemCpu The IEM per CPU data.
5891 * @param u64Value The value to push.
5892 * @param pTmpRsp Pointer to the temporary stack pointer.
5893 */
5894static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5895{
5896 /* Increment the stack pointer. */
5897 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5898 RTUINT64U NewRsp = *pTmpRsp;
5899 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5900
5901 /* Write the word the lazy way. */
5902 uint64_t *pu64Dst;
5903 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5904 if (rc == VINF_SUCCESS)
5905 {
5906 *pu64Dst = u64Value;
5907 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5908 }
5909
5910 /* Commit the new RSP value unless we an access handler made trouble. */
5911 if (rc == VINF_SUCCESS)
5912 *pTmpRsp = NewRsp;
5913
5914 return rc;
5915}
5916
5917
5918/**
5919 * Pops a word from the stack, using a temporary stack pointer.
5920 *
5921 * @returns Strict VBox status code.
5922 * @param pIemCpu The IEM per CPU data.
5923 * @param pu16Value Where to store the popped value.
5924 * @param pTmpRsp Pointer to the temporary stack pointer.
5925 */
5926static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5927{
5928 /* Increment the stack pointer. */
5929 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5930 RTUINT64U NewRsp = *pTmpRsp;
5931 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5932
5933 /* Write the word the lazy way. */
5934 uint16_t const *pu16Src;
5935 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5936 if (rc == VINF_SUCCESS)
5937 {
5938 *pu16Value = *pu16Src;
5939 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5940
5941 /* Commit the new RSP value. */
5942 if (rc == VINF_SUCCESS)
5943 *pTmpRsp = NewRsp;
5944 }
5945
5946 return rc;
5947}
5948
5949
5950/**
5951 * Pops a dword from the stack, using a temporary stack pointer.
5952 *
5953 * @returns Strict VBox status code.
5954 * @param pIemCpu The IEM per CPU data.
5955 * @param pu32Value Where to store the popped value.
5956 * @param pTmpRsp Pointer to the temporary stack pointer.
5957 */
5958static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5959{
5960 /* Increment the stack pointer. */
5961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5962 RTUINT64U NewRsp = *pTmpRsp;
5963 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5964
5965 /* Write the word the lazy way. */
5966 uint32_t const *pu32Src;
5967 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5968 if (rc == VINF_SUCCESS)
5969 {
5970 *pu32Value = *pu32Src;
5971 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5972
5973 /* Commit the new RSP value. */
5974 if (rc == VINF_SUCCESS)
5975 *pTmpRsp = NewRsp;
5976 }
5977
5978 return rc;
5979}
5980
5981
5982/**
5983 * Pops a qword from the stack, using a temporary stack pointer.
5984 *
5985 * @returns Strict VBox status code.
5986 * @param pIemCpu The IEM per CPU data.
5987 * @param pu64Value Where to store the popped value.
5988 * @param pTmpRsp Pointer to the temporary stack pointer.
5989 */
5990static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5991{
5992 /* Increment the stack pointer. */
5993 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5994 RTUINT64U NewRsp = *pTmpRsp;
5995 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5996
5997 /* Write the word the lazy way. */
5998 uint64_t const *pu64Src;
5999 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6000 if (rcStrict == VINF_SUCCESS)
6001 {
6002 *pu64Value = *pu64Src;
6003 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6004
6005 /* Commit the new RSP value. */
6006 if (rcStrict == VINF_SUCCESS)
6007 *pTmpRsp = NewRsp;
6008 }
6009
6010 return rcStrict;
6011}
6012
6013
6014/**
6015 * Begin a special stack push (used by interrupt, exceptions and such).
6016 *
6017 * This will raise #SS or #PF if appropriate.
6018 *
6019 * @returns Strict VBox status code.
6020 * @param pIemCpu The IEM per CPU data.
6021 * @param cbMem The number of bytes to push onto the stack.
6022 * @param ppvMem Where to return the pointer to the stack memory.
6023 * As with the other memory functions this could be
6024 * direct access or bounce buffered access, so
6025 * don't commit register until the commit call
6026 * succeeds.
6027 * @param puNewRsp Where to return the new RSP value. This must be
6028 * passed unchanged to
6029 * iemMemStackPushCommitSpecial().
6030 */
6031static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6032{
6033 Assert(cbMem < UINT8_MAX);
6034 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6035 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
6036 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6037}
6038
6039
6040/**
6041 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6042 *
6043 * This will update the rSP.
6044 *
6045 * @returns Strict VBox status code.
6046 * @param pIemCpu The IEM per CPU data.
6047 * @param pvMem The pointer returned by
6048 * iemMemStackPushBeginSpecial().
6049 * @param uNewRsp The new RSP value returned by
6050 * iemMemStackPushBeginSpecial().
6051 */
6052static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6053{
6054 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6055 if (rcStrict == VINF_SUCCESS)
6056 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6057 return rcStrict;
6058}
6059
6060
6061/**
6062 * Begin a special stack pop (used by iret, retf and such).
6063 *
6064 * This will raise \#SS or \#PF if appropriate.
6065 *
6066 * @returns Strict VBox status code.
6067 * @param pIemCpu The IEM per CPU data.
6068 * @param cbMem The number of bytes to push onto the stack.
6069 * @param ppvMem Where to return the pointer to the stack memory.
6070 * @param puNewRsp Where to return the new RSP value. This must be
6071 * passed unchanged to
6072 * iemMemStackPopCommitSpecial() or applied
6073 * manually if iemMemStackPopDoneSpecial() is used.
6074 */
6075static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6076{
6077 Assert(cbMem < UINT8_MAX);
6078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6079 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
6080 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6081}
6082
6083
6084/**
6085 * Continue a special stack pop (used by iret and retf).
6086 *
6087 * This will raise \#SS or \#PF if appropriate.
6088 *
6089 * @returns Strict VBox status code.
6090 * @param pIemCpu The IEM per CPU data.
6091 * @param cbMem The number of bytes to push onto the stack.
6092 * @param ppvMem Where to return the pointer to the stack memory.
6093 * @param puNewRsp Where to return the new RSP value. This must be
6094 * passed unchanged to
6095 * iemMemStackPopCommitSpecial() or applied
6096 * manually if iemMemStackPopDoneSpecial() is used.
6097 */
6098static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6099{
6100 Assert(cbMem < UINT8_MAX);
6101 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6102 RTUINT64U NewRsp;
6103 NewRsp.u = *puNewRsp;
6104 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6105 *puNewRsp = NewRsp.u;
6106 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6107}
6108
6109
6110/**
6111 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6112 *
6113 * This will update the rSP.
6114 *
6115 * @returns Strict VBox status code.
6116 * @param pIemCpu The IEM per CPU data.
6117 * @param pvMem The pointer returned by
6118 * iemMemStackPopBeginSpecial().
6119 * @param uNewRsp The new RSP value returned by
6120 * iemMemStackPopBeginSpecial().
6121 */
6122static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6123{
6124 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6125 if (rcStrict == VINF_SUCCESS)
6126 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6127 return rcStrict;
6128}
6129
6130
6131/**
6132 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6133 * iemMemStackPopContinueSpecial).
6134 *
6135 * The caller will manually commit the rSP.
6136 *
6137 * @returns Strict VBox status code.
6138 * @param pIemCpu The IEM per CPU data.
6139 * @param pvMem The pointer returned by
6140 * iemMemStackPopBeginSpecial() or
6141 * iemMemStackPopContinueSpecial().
6142 */
6143static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6144{
6145 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6146}
6147
6148
6149/**
6150 * Fetches a system table dword.
6151 *
6152 * @returns Strict VBox status code.
6153 * @param pIemCpu The IEM per CPU data.
6154 * @param pu32Dst Where to return the dword.
6155 * @param iSegReg The index of the segment register to use for
6156 * this access. The base and limits are checked.
6157 * @param GCPtrMem The address of the guest memory.
6158 */
6159static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6160{
6161 /* The lazy approach for now... */
6162 uint32_t const *pu32Src;
6163 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6164 if (rc == VINF_SUCCESS)
6165 {
6166 *pu32Dst = *pu32Src;
6167 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6168 }
6169 return rc;
6170}
6171
6172
6173/**
6174 * Fetches a system table qword.
6175 *
6176 * @returns Strict VBox status code.
6177 * @param pIemCpu The IEM per CPU data.
6178 * @param pu64Dst Where to return the qword.
6179 * @param iSegReg The index of the segment register to use for
6180 * this access. The base and limits are checked.
6181 * @param GCPtrMem The address of the guest memory.
6182 */
6183static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6184{
6185 /* The lazy approach for now... */
6186 uint64_t const *pu64Src;
6187 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6188 if (rc == VINF_SUCCESS)
6189 {
6190 *pu64Dst = *pu64Src;
6191 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6192 }
6193 return rc;
6194}
6195
6196
6197/**
6198 * Fetches a descriptor table entry.
6199 *
6200 * @returns Strict VBox status code.
6201 * @param pIemCpu The IEM per CPU.
6202 * @param pDesc Where to return the descriptor table entry.
6203 * @param uSel The selector which table entry to fetch.
6204 */
6205static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6206{
6207 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6208
6209 /** @todo did the 286 require all 8 bytes to be accessible? */
6210 /*
6211 * Get the selector table base and check bounds.
6212 */
6213 RTGCPTR GCPtrBase;
6214 if (uSel & X86_SEL_LDT)
6215 {
6216 if ( !pCtx->ldtr.Attr.n.u1Present
6217 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6218 {
6219 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6220 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6221 /** @todo is this the right exception? */
6222 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6223 }
6224
6225 Assert(pCtx->ldtr.Attr.n.u1Present);
6226 GCPtrBase = pCtx->ldtr.u64Base;
6227 }
6228 else
6229 {
6230 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6231 {
6232 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6233 /** @todo is this the right exception? */
6234 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6235 }
6236 GCPtrBase = pCtx->gdtr.pGdt;
6237 }
6238
6239 /*
6240 * Read the legacy descriptor and maybe the long mode extensions if
6241 * required.
6242 */
6243 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6244 if (rcStrict == VINF_SUCCESS)
6245 {
6246 if ( !IEM_IS_LONG_MODE(pIemCpu)
6247 || pDesc->Legacy.Gen.u1DescType)
6248 pDesc->Long.au64[1] = 0;
6249 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6250 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6251 else
6252 {
6253 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6254 /** @todo is this the right exception? */
6255 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6256 }
6257 }
6258 return rcStrict;
6259}
6260
6261
6262/**
6263 * Fakes a long mode stack selector for SS = 0.
6264 *
6265 * @param pDescSs Where to return the fake stack descriptor.
6266 * @param uDpl The DPL we want.
6267 */
6268static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6269{
6270 pDescSs->Long.au64[0] = 0;
6271 pDescSs->Long.au64[1] = 0;
6272 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6273 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6274 pDescSs->Long.Gen.u2Dpl = uDpl;
6275 pDescSs->Long.Gen.u1Present = 1;
6276 pDescSs->Long.Gen.u1Long = 1;
6277}
6278
6279
6280/**
6281 * Marks the selector descriptor as accessed (only non-system descriptors).
6282 *
6283 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6284 * will therefore skip the limit checks.
6285 *
6286 * @returns Strict VBox status code.
6287 * @param pIemCpu The IEM per CPU.
6288 * @param uSel The selector.
6289 */
6290static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6291{
6292 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6293
6294 /*
6295 * Get the selector table base and calculate the entry address.
6296 */
6297 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6298 ? pCtx->ldtr.u64Base
6299 : pCtx->gdtr.pGdt;
6300 GCPtr += uSel & X86_SEL_MASK;
6301
6302 /*
6303 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6304 * ugly stuff to avoid this. This will make sure it's an atomic access
6305 * as well more or less remove any question about 8-bit or 32-bit accesss.
6306 */
6307 VBOXSTRICTRC rcStrict;
6308 uint32_t volatile *pu32;
6309 if ((GCPtr & 3) == 0)
6310 {
6311 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6312 GCPtr += 2 + 2;
6313 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6314 if (rcStrict != VINF_SUCCESS)
6315 return rcStrict;
6316 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6317 }
6318 else
6319 {
6320 /* The misaligned GDT/LDT case, map the whole thing. */
6321 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6322 if (rcStrict != VINF_SUCCESS)
6323 return rcStrict;
6324 switch ((uintptr_t)pu32 & 3)
6325 {
6326 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6327 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6328 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6329 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6330 }
6331 }
6332
6333 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6334}
6335
6336/** @} */
6337
6338
6339/*
6340 * Include the C/C++ implementation of instruction.
6341 */
6342#include "IEMAllCImpl.cpp.h"
6343
6344
6345
6346/** @name "Microcode" macros.
6347 *
6348 * The idea is that we should be able to use the same code to interpret
6349 * instructions as well as recompiler instructions. Thus this obfuscation.
6350 *
6351 * @{
6352 */
6353#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6354#define IEM_MC_END() }
6355#define IEM_MC_PAUSE() do {} while (0)
6356#define IEM_MC_CONTINUE() do {} while (0)
6357
6358/** Internal macro. */
6359#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6360 do \
6361 { \
6362 VBOXSTRICTRC rcStrict2 = a_Expr; \
6363 if (rcStrict2 != VINF_SUCCESS) \
6364 return rcStrict2; \
6365 } while (0)
6366
6367#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6368#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6369#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6370#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6371#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6372#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6373#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6374
6375#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6376#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6377 do { \
6378 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6379 return iemRaiseDeviceNotAvailable(pIemCpu); \
6380 } while (0)
6381#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6382 do { \
6383 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6384 return iemRaiseMathFault(pIemCpu); \
6385 } while (0)
6386#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6387 do { \
6388 if (pIemCpu->uCpl != 0) \
6389 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6390 } while (0)
6391
6392
6393#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6394#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6395#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6396#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6397#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6398#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6399#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6400 uint32_t a_Name; \
6401 uint32_t *a_pName = &a_Name
6402#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6403 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6404
6405#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6406#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6407
6408#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6409#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6410#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6411#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6412#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6413#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6414#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6415#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6416#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6417#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6418#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6419#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6420#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6421#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6422#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6423#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6424#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6425#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6426#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6427#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6428#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6429#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6430#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6431#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6432#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6433#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6434#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6435#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6436#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6437/** @note Not for IOPL or IF testing or modification. */
6438#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6439#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6440#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6441#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6442
6443#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6444#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6445#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6446#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6447#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6448#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6449#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6450#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6451#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6452#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6453#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6454 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6455
6456#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6457#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6458/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6459 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6460#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6461#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6462/** @note Not for IOPL or IF testing or modification. */
6463#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6464
6465#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6466#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6467#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6468 do { \
6469 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6470 *pu32Reg += (a_u32Value); \
6471 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6472 } while (0)
6473#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6474
6475#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6476#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6477#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6478 do { \
6479 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6480 *pu32Reg -= (a_u32Value); \
6481 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6482 } while (0)
6483#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6484
6485#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6486#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6487#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6488#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6489#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6490#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6491#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6492
6493#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6494#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6495#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6496#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6497
6498#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6499#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6500#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6501
6502#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6503#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6504
6505#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6506#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6507#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6508
6509#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6510#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6511#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6512
6513#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6514
6515#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6516
6517#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6518#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6519#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6520 do { \
6521 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6522 *pu32Reg &= (a_u32Value); \
6523 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6524 } while (0)
6525#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6526
6527#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6528#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6529#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6530 do { \
6531 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6532 *pu32Reg |= (a_u32Value); \
6533 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6534 } while (0)
6535#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6536
6537
6538/** @note Not for IOPL or IF modification. */
6539#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6540/** @note Not for IOPL or IF modification. */
6541#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6542/** @note Not for IOPL or IF modification. */
6543#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6544
6545#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6546
6547
6548#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6549 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6550#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6551 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6552#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6554
6555#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6557#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6559#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6561
6562#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6563 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6564#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6566#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6568
6569#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6570 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6571
6572#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6574#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6576
6577#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6579#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6581#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6582 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6583
6584
6585#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6586 do { \
6587 uint8_t u8Tmp; \
6588 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6589 (a_u16Dst) = u8Tmp; \
6590 } while (0)
6591#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6592 do { \
6593 uint8_t u8Tmp; \
6594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6595 (a_u32Dst) = u8Tmp; \
6596 } while (0)
6597#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6598 do { \
6599 uint8_t u8Tmp; \
6600 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6601 (a_u64Dst) = u8Tmp; \
6602 } while (0)
6603#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6604 do { \
6605 uint16_t u16Tmp; \
6606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6607 (a_u32Dst) = u16Tmp; \
6608 } while (0)
6609#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6610 do { \
6611 uint16_t u16Tmp; \
6612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6613 (a_u64Dst) = u16Tmp; \
6614 } while (0)
6615#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6616 do { \
6617 uint32_t u32Tmp; \
6618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6619 (a_u64Dst) = u32Tmp; \
6620 } while (0)
6621
6622#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6623 do { \
6624 uint8_t u8Tmp; \
6625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6626 (a_u16Dst) = (int8_t)u8Tmp; \
6627 } while (0)
6628#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6629 do { \
6630 uint8_t u8Tmp; \
6631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6632 (a_u32Dst) = (int8_t)u8Tmp; \
6633 } while (0)
6634#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6635 do { \
6636 uint8_t u8Tmp; \
6637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6638 (a_u64Dst) = (int8_t)u8Tmp; \
6639 } while (0)
6640#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6641 do { \
6642 uint16_t u16Tmp; \
6643 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6644 (a_u32Dst) = (int16_t)u16Tmp; \
6645 } while (0)
6646#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6647 do { \
6648 uint16_t u16Tmp; \
6649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6650 (a_u64Dst) = (int16_t)u16Tmp; \
6651 } while (0)
6652#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6653 do { \
6654 uint32_t u32Tmp; \
6655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6656 (a_u64Dst) = (int32_t)u32Tmp; \
6657 } while (0)
6658
6659#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6660 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6661#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6662 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6663#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6664 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6665#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6666 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6667
6668#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6669 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6670#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6671 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6672#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6673 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6674#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6675 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6676
6677#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6678#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6679#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6680#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6681#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6682#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6683#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6684 do { \
6685 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6686 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6687 } while (0)
6688
6689
6690#define IEM_MC_PUSH_U16(a_u16Value) \
6691 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6692#define IEM_MC_PUSH_U32(a_u32Value) \
6693 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6694#define IEM_MC_PUSH_U64(a_u64Value) \
6695 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6696
6697#define IEM_MC_POP_U16(a_pu16Value) \
6698 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6699#define IEM_MC_POP_U32(a_pu32Value) \
6700 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6701#define IEM_MC_POP_U64(a_pu64Value) \
6702 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6703
6704/** Maps guest memory for direct or bounce buffered access.
6705 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6706 * @remarks May return.
6707 */
6708#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6709 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6710
6711/** Maps guest memory for direct or bounce buffered access.
6712 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6713 * @remarks May return.
6714 */
6715#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6716 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6717
6718/** Commits the memory and unmaps the guest memory.
6719 * @remarks May return.
6720 */
6721#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6722 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6723
6724/** Commits the memory and unmaps the guest memory unless the FPU status word
6725 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6726 * that would cause FLD not to store.
6727 *
6728 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6729 * store, while \#P will not.
6730 *
6731 * @remarks May in theory return - for now.
6732 */
6733#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6734 do { \
6735 if ( !(a_u16FSW & X86_FSW_ES) \
6736 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6737 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6738 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6739 } while (0)
6740
6741/** Calculate efficient address from R/M. */
6742#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6743 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6744
6745#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6746#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6747#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6748#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6749#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6750
6751/**
6752 * Defers the rest of the instruction emulation to a C implementation routine
6753 * and returns, only taking the standard parameters.
6754 *
6755 * @param a_pfnCImpl The pointer to the C routine.
6756 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6757 */
6758#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6759
6760/**
6761 * Defers the rest of instruction emulation to a C implementation routine and
6762 * returns, taking one argument in addition to the standard ones.
6763 *
6764 * @param a_pfnCImpl The pointer to the C routine.
6765 * @param a0 The argument.
6766 */
6767#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6768
6769/**
6770 * Defers the rest of the instruction emulation to a C implementation routine
6771 * and returns, taking two arguments in addition to the standard ones.
6772 *
6773 * @param a_pfnCImpl The pointer to the C routine.
6774 * @param a0 The first extra argument.
6775 * @param a1 The second extra argument.
6776 */
6777#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6778
6779/**
6780 * Defers the rest of the instruction emulation to a C implementation routine
6781 * and returns, taking two arguments in addition to the standard ones.
6782 *
6783 * @param a_pfnCImpl The pointer to the C routine.
6784 * @param a0 The first extra argument.
6785 * @param a1 The second extra argument.
6786 * @param a2 The third extra argument.
6787 */
6788#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6789
6790/**
6791 * Defers the rest of the instruction emulation to a C implementation routine
6792 * and returns, taking two arguments in addition to the standard ones.
6793 *
6794 * @param a_pfnCImpl The pointer to the C routine.
6795 * @param a0 The first extra argument.
6796 * @param a1 The second extra argument.
6797 * @param a2 The third extra argument.
6798 * @param a3 The fourth extra argument.
6799 * @param a4 The fifth extra argument.
6800 */
6801#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6802
6803/**
6804 * Defers the entire instruction emulation to a C implementation routine and
6805 * returns, only taking the standard parameters.
6806 *
6807 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6808 *
6809 * @param a_pfnCImpl The pointer to the C routine.
6810 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6811 */
6812#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6813
6814/**
6815 * Defers the entire instruction emulation to a C implementation routine and
6816 * returns, taking one argument in addition to the standard ones.
6817 *
6818 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6819 *
6820 * @param a_pfnCImpl The pointer to the C routine.
6821 * @param a0 The argument.
6822 */
6823#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6824
6825/**
6826 * Defers the entire instruction emulation to a C implementation routine and
6827 * returns, taking two arguments in addition to the standard ones.
6828 *
6829 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6830 *
6831 * @param a_pfnCImpl The pointer to the C routine.
6832 * @param a0 The first extra argument.
6833 * @param a1 The second extra argument.
6834 */
6835#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6836
6837/**
6838 * Defers the entire instruction emulation to a C implementation routine and
6839 * returns, taking three arguments in addition to the standard ones.
6840 *
6841 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6842 *
6843 * @param a_pfnCImpl The pointer to the C routine.
6844 * @param a0 The first extra argument.
6845 * @param a1 The second extra argument.
6846 * @param a2 The third extra argument.
6847 */
6848#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6849
6850/**
6851 * Calls a FPU assembly implementation taking one visible argument.
6852 *
6853 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6854 * @param a0 The first extra argument.
6855 */
6856#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6857 do { \
6858 iemFpuPrepareUsage(pIemCpu); \
6859 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6860 } while (0)
6861
6862/**
6863 * Calls a FPU assembly implementation taking two visible arguments.
6864 *
6865 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6866 * @param a0 The first extra argument.
6867 * @param a1 The second extra argument.
6868 */
6869#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6870 do { \
6871 iemFpuPrepareUsage(pIemCpu); \
6872 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6873 } while (0)
6874
6875/**
6876 * Calls a FPU assembly implementation taking three visible arguments.
6877 *
6878 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6879 * @param a0 The first extra argument.
6880 * @param a1 The second extra argument.
6881 * @param a2 The third extra argument.
6882 */
6883#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6884 do { \
6885 iemFpuPrepareUsage(pIemCpu); \
6886 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6887 } while (0)
6888
6889#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6890 do { \
6891 (a_FpuData).FSW = (a_FSW); \
6892 (a_FpuData).r80Result = *(a_pr80Value); \
6893 } while (0)
6894
6895/** Pushes FPU result onto the stack. */
6896#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6897 iemFpuPushResult(pIemCpu, &a_FpuData)
6898/** Pushes FPU result onto the stack and sets the FPUDP. */
6899#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6900 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6901
6902/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6903#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6904 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6905
6906/** Stores FPU result in a stack register. */
6907#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6908 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6909/** Stores FPU result in a stack register and pops the stack. */
6910#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6911 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6912/** Stores FPU result in a stack register and sets the FPUDP. */
6913#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6914 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6915/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6916 * stack. */
6917#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6918 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6919
6920/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6921#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6922 iemFpuUpdateOpcodeAndIp(pIemCpu)
6923/** Free a stack register (for FFREE and FFREEP). */
6924#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6925 iemFpuStackFree(pIemCpu, a_iStReg)
6926/** Increment the FPU stack pointer. */
6927#define IEM_MC_FPU_STACK_INC_TOP() \
6928 iemFpuStackIncTop(pIemCpu)
6929/** Decrement the FPU stack pointer. */
6930#define IEM_MC_FPU_STACK_DEC_TOP() \
6931 iemFpuStackDecTop(pIemCpu)
6932
6933/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6934#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6935 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6936/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6937#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6938 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6939/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6940#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6941 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6942/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6943#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6944 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6945/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6946 * stack. */
6947#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6948 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6949/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6950#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6951 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6952
6953/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6954#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6955 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6956/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6957 * stack. */
6958#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6959 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6960/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6961 * FPUDS. */
6962#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6963 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6964/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6965 * FPUDS. Pops stack. */
6966#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6967 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6968/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6969 * stack twice. */
6970#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6971 iemFpuStackUnderflowThenPopPop(pIemCpu)
6972/** Raises a FPU stack underflow exception for an instruction pushing a result
6973 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6974#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6975 iemFpuStackPushUnderflow(pIemCpu)
6976/** Raises a FPU stack underflow exception for an instruction pushing a result
6977 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6978#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6979 iemFpuStackPushUnderflowTwo(pIemCpu)
6980
6981/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6982 * FPUIP, FPUCS and FOP. */
6983#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6984 iemFpuStackPushOverflow(pIemCpu)
6985/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6986 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6987#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6988 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6989/** Indicates that we (might) have modified the FPU state. */
6990#define IEM_MC_USED_FPU() \
6991 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
6992
6993/** @note Not for IOPL or IF testing. */
6994#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6995/** @note Not for IOPL or IF testing. */
6996#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6997/** @note Not for IOPL or IF testing. */
6998#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6999/** @note Not for IOPL or IF testing. */
7000#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
7001/** @note Not for IOPL or IF testing. */
7002#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
7003 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7004 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7005/** @note Not for IOPL or IF testing. */
7006#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7007 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7008 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7009/** @note Not for IOPL or IF testing. */
7010#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7011 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7012 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7013 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7014/** @note Not for IOPL or IF testing. */
7015#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7016 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7017 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7018 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7019#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7020#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7021#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7022/** @note Not for IOPL or IF testing. */
7023#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7024 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7025 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7026/** @note Not for IOPL or IF testing. */
7027#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7028 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7029 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7030/** @note Not for IOPL or IF testing. */
7031#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7032 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7033 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7034/** @note Not for IOPL or IF testing. */
7035#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7036 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7037 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7038/** @note Not for IOPL or IF testing. */
7039#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7040 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7041 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7042/** @note Not for IOPL or IF testing. */
7043#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7044 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7045 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7046#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7047#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7048#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7049 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7050#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7051 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7052#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7053 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7054#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7055 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7056#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7057 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7058#define IEM_MC_IF_FCW_IM() \
7059 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7060
7061#define IEM_MC_ELSE() } else {
7062#define IEM_MC_ENDIF() } do {} while (0)
7063
7064/** @} */
7065
7066
7067/** @name Opcode Debug Helpers.
7068 * @{
7069 */
7070#ifdef DEBUG
7071# define IEMOP_MNEMONIC(a_szMnemonic) \
7072 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7073 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7074# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7075 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7076 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7077#else
7078# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7079# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7080#endif
7081
7082/** @} */
7083
7084
7085/** @name Opcode Helpers.
7086 * @{
7087 */
7088
7089/** The instruction raises an \#UD in real and V8086 mode. */
7090#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7091 do \
7092 { \
7093 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7094 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7095 } while (0)
7096
7097/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7098 * lock prefixed.
7099 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7100#define IEMOP_HLP_NO_LOCK_PREFIX() \
7101 do \
7102 { \
7103 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7104 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7105 } while (0)
7106
7107/** The instruction is not available in 64-bit mode, throw #UD if we're in
7108 * 64-bit mode. */
7109#define IEMOP_HLP_NO_64BIT() \
7110 do \
7111 { \
7112 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7113 return IEMOP_RAISE_INVALID_OPCODE(); \
7114 } while (0)
7115
7116/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7117#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7118 do \
7119 { \
7120 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7121 iemRecalEffOpSize64Default(pIemCpu); \
7122 } while (0)
7123
7124/** The instruction has 64-bit operand size if 64-bit mode. */
7125#define IEMOP_HLP_64BIT_OP_SIZE() \
7126 do \
7127 { \
7128 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7129 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7130 } while (0)
7131
7132/**
7133 * Done decoding.
7134 */
7135#define IEMOP_HLP_DONE_DECODING() \
7136 do \
7137 { \
7138 /*nothing for now, maybe later... */ \
7139 } while (0)
7140
7141/**
7142 * Done decoding, raise \#UD exception if lock prefix present.
7143 */
7144#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7145 do \
7146 { \
7147 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7148 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7149 } while (0)
7150
7151
7152/**
7153 * Calculates the effective address of a ModR/M memory operand.
7154 *
7155 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7156 *
7157 * @return Strict VBox status code.
7158 * @param pIemCpu The IEM per CPU data.
7159 * @param bRm The ModRM byte.
7160 * @param pGCPtrEff Where to return the effective address.
7161 */
7162static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
7163{
7164 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7165 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7166#define SET_SS_DEF() \
7167 do \
7168 { \
7169 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7170 pIemCpu->iEffSeg = X86_SREG_SS; \
7171 } while (0)
7172
7173/** @todo Check the effective address size crap! */
7174 switch (pIemCpu->enmEffAddrMode)
7175 {
7176 case IEMMODE_16BIT:
7177 {
7178 uint16_t u16EffAddr;
7179
7180 /* Handle the disp16 form with no registers first. */
7181 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7182 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7183 else
7184 {
7185 /* Get the displacment. */
7186 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7187 {
7188 case 0: u16EffAddr = 0; break;
7189 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7190 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7191 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7192 }
7193
7194 /* Add the base and index registers to the disp. */
7195 switch (bRm & X86_MODRM_RM_MASK)
7196 {
7197 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7198 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7199 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7200 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7201 case 4: u16EffAddr += pCtx->si; break;
7202 case 5: u16EffAddr += pCtx->di; break;
7203 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7204 case 7: u16EffAddr += pCtx->bx; break;
7205 }
7206 }
7207
7208 *pGCPtrEff = u16EffAddr;
7209 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
7210 return VINF_SUCCESS;
7211 }
7212
7213 case IEMMODE_32BIT:
7214 {
7215 uint32_t u32EffAddr;
7216
7217 /* Handle the disp32 form with no registers first. */
7218 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7219 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7220 else
7221 {
7222 /* Get the register (or SIB) value. */
7223 switch ((bRm & X86_MODRM_RM_MASK))
7224 {
7225 case 0: u32EffAddr = pCtx->eax; break;
7226 case 1: u32EffAddr = pCtx->ecx; break;
7227 case 2: u32EffAddr = pCtx->edx; break;
7228 case 3: u32EffAddr = pCtx->ebx; break;
7229 case 4: /* SIB */
7230 {
7231 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7232
7233 /* Get the index and scale it. */
7234 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7235 {
7236 case 0: u32EffAddr = pCtx->eax; break;
7237 case 1: u32EffAddr = pCtx->ecx; break;
7238 case 2: u32EffAddr = pCtx->edx; break;
7239 case 3: u32EffAddr = pCtx->ebx; break;
7240 case 4: u32EffAddr = 0; /*none */ break;
7241 case 5: u32EffAddr = pCtx->ebp; break;
7242 case 6: u32EffAddr = pCtx->esi; break;
7243 case 7: u32EffAddr = pCtx->edi; break;
7244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7245 }
7246 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7247
7248 /* add base */
7249 switch (bSib & X86_SIB_BASE_MASK)
7250 {
7251 case 0: u32EffAddr += pCtx->eax; break;
7252 case 1: u32EffAddr += pCtx->ecx; break;
7253 case 2: u32EffAddr += pCtx->edx; break;
7254 case 3: u32EffAddr += pCtx->ebx; break;
7255 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7256 case 5:
7257 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7258 {
7259 u32EffAddr += pCtx->ebp;
7260 SET_SS_DEF();
7261 }
7262 else
7263 {
7264 uint32_t u32Disp;
7265 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7266 u32EffAddr += u32Disp;
7267 }
7268 break;
7269 case 6: u32EffAddr += pCtx->esi; break;
7270 case 7: u32EffAddr += pCtx->edi; break;
7271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7272 }
7273 break;
7274 }
7275 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7276 case 6: u32EffAddr = pCtx->esi; break;
7277 case 7: u32EffAddr = pCtx->edi; break;
7278 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7279 }
7280
7281 /* Get and add the displacement. */
7282 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7283 {
7284 case 0:
7285 break;
7286 case 1:
7287 {
7288 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7289 u32EffAddr += i8Disp;
7290 break;
7291 }
7292 case 2:
7293 {
7294 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7295 u32EffAddr += u32Disp;
7296 break;
7297 }
7298 default:
7299 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7300 }
7301
7302 }
7303 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7304 *pGCPtrEff = u32EffAddr;
7305 else
7306 {
7307 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7308 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7309 }
7310 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7311 return VINF_SUCCESS;
7312 }
7313
7314 case IEMMODE_64BIT:
7315 {
7316 uint64_t u64EffAddr;
7317
7318 /* Handle the rip+disp32 form with no registers first. */
7319 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7320 {
7321 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7322 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
7323 }
7324 else
7325 {
7326 /* Get the register (or SIB) value. */
7327 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7328 {
7329 case 0: u64EffAddr = pCtx->rax; break;
7330 case 1: u64EffAddr = pCtx->rcx; break;
7331 case 2: u64EffAddr = pCtx->rdx; break;
7332 case 3: u64EffAddr = pCtx->rbx; break;
7333 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7334 case 6: u64EffAddr = pCtx->rsi; break;
7335 case 7: u64EffAddr = pCtx->rdi; break;
7336 case 8: u64EffAddr = pCtx->r8; break;
7337 case 9: u64EffAddr = pCtx->r9; break;
7338 case 10: u64EffAddr = pCtx->r10; break;
7339 case 11: u64EffAddr = pCtx->r11; break;
7340 case 13: u64EffAddr = pCtx->r13; break;
7341 case 14: u64EffAddr = pCtx->r14; break;
7342 case 15: u64EffAddr = pCtx->r15; break;
7343 /* SIB */
7344 case 4:
7345 case 12:
7346 {
7347 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7348
7349 /* Get the index and scale it. */
7350 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7351 {
7352 case 0: u64EffAddr = pCtx->rax; break;
7353 case 1: u64EffAddr = pCtx->rcx; break;
7354 case 2: u64EffAddr = pCtx->rdx; break;
7355 case 3: u64EffAddr = pCtx->rbx; break;
7356 case 4: u64EffAddr = 0; /*none */ break;
7357 case 5: u64EffAddr = pCtx->rbp; break;
7358 case 6: u64EffAddr = pCtx->rsi; break;
7359 case 7: u64EffAddr = pCtx->rdi; break;
7360 case 8: u64EffAddr = pCtx->r8; break;
7361 case 9: u64EffAddr = pCtx->r9; break;
7362 case 10: u64EffAddr = pCtx->r10; break;
7363 case 11: u64EffAddr = pCtx->r11; break;
7364 case 12: u64EffAddr = pCtx->r12; break;
7365 case 13: u64EffAddr = pCtx->r13; break;
7366 case 14: u64EffAddr = pCtx->r14; break;
7367 case 15: u64EffAddr = pCtx->r15; break;
7368 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7369 }
7370 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7371
7372 /* add base */
7373 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7374 {
7375 case 0: u64EffAddr += pCtx->rax; break;
7376 case 1: u64EffAddr += pCtx->rcx; break;
7377 case 2: u64EffAddr += pCtx->rdx; break;
7378 case 3: u64EffAddr += pCtx->rbx; break;
7379 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7380 case 6: u64EffAddr += pCtx->rsi; break;
7381 case 7: u64EffAddr += pCtx->rdi; break;
7382 case 8: u64EffAddr += pCtx->r8; break;
7383 case 9: u64EffAddr += pCtx->r9; break;
7384 case 10: u64EffAddr += pCtx->r10; break;
7385 case 11: u64EffAddr += pCtx->r11; break;
7386 case 14: u64EffAddr += pCtx->r14; break;
7387 case 15: u64EffAddr += pCtx->r15; break;
7388 /* complicated encodings */
7389 case 5:
7390 case 13:
7391 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7392 {
7393 if (!pIemCpu->uRexB)
7394 {
7395 u64EffAddr += pCtx->rbp;
7396 SET_SS_DEF();
7397 }
7398 else
7399 u64EffAddr += pCtx->r13;
7400 }
7401 else
7402 {
7403 uint32_t u32Disp;
7404 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7405 u64EffAddr += (int32_t)u32Disp;
7406 }
7407 break;
7408 }
7409 break;
7410 }
7411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7412 }
7413
7414 /* Get and add the displacement. */
7415 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7416 {
7417 case 0:
7418 break;
7419 case 1:
7420 {
7421 int8_t i8Disp;
7422 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7423 u64EffAddr += i8Disp;
7424 break;
7425 }
7426 case 2:
7427 {
7428 uint32_t u32Disp;
7429 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7430 u64EffAddr += (int32_t)u32Disp;
7431 break;
7432 }
7433 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7434 }
7435
7436 }
7437 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7438 *pGCPtrEff = u64EffAddr;
7439 else
7440 *pGCPtrEff = u64EffAddr & UINT16_MAX;
7441 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7442 return VINF_SUCCESS;
7443 }
7444 }
7445
7446 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7447}
7448
7449/** @} */
7450
7451
7452
7453/*
7454 * Include the instructions
7455 */
7456#include "IEMAllInstructions.cpp.h"
7457
7458
7459
7460
7461#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7462
7463/**
7464 * Sets up execution verification mode.
7465 */
7466static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7467{
7468 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7469 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7470
7471 /*
7472 * Always note down the address of the current instruction.
7473 */
7474 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7475 pIemCpu->uOldRip = pOrgCtx->rip;
7476
7477 /*
7478 * Enable verification and/or logging.
7479 */
7480 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7481 if ( pIemCpu->fNoRem
7482 && ( 0
7483#if 0 /* auto enable on first paged protected mode interrupt */
7484 || ( pOrgCtx->eflags.Bits.u1IF
7485 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7486 && TRPMHasTrap(pVCpu)
7487 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7488#endif
7489#if 0
7490 || ( pOrgCtx->cs == 0x10
7491 && ( pOrgCtx->rip == 0x90119e3e
7492 || pOrgCtx->rip == 0x901d9810)
7493#endif
7494#if 0 /* Auto enable DSL - FPU stuff. */
7495 || ( pOrgCtx->cs == 0x10
7496 && (// pOrgCtx->rip == 0xc02ec07f
7497 //|| pOrgCtx->rip == 0xc02ec082
7498 //|| pOrgCtx->rip == 0xc02ec0c9
7499 0
7500 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7501#endif
7502#if 0 /* Auto enable DSL - fstp st0 stuff. */
7503 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7504#endif
7505#if 0
7506 || pOrgCtx->rip == 0x9022bb3a
7507#endif
7508#if 0
7509 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7510#endif
7511#if 0
7512 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7513 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7514#endif
7515#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7516 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7517 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7518 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7519#endif
7520#if 0 /* NT4SP1 - xadd early boot. */
7521 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7522#endif
7523#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7524 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7525#endif
7526#if 0 /* NT4SP1 - cmpxchg (AMD). */
7527 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7528#endif
7529#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7530 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7531#endif
7532#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7533 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7534
7535#endif
7536#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7537 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7538
7539#endif
7540#if 0 /* NT4SP1 - frstor [ecx] */
7541 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7542#endif
7543 )
7544 )
7545 {
7546 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7547 RTLogFlags(NULL, "enabled");
7548 pIemCpu->fNoRem = false;
7549 }
7550
7551 /*
7552 * Switch state.
7553 */
7554 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7555 {
7556 static CPUMCTX s_DebugCtx; /* Ugly! */
7557
7558 s_DebugCtx = *pOrgCtx;
7559 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7560 }
7561
7562 /*
7563 * See if there is an interrupt pending in TRPM and inject it if we can.
7564 */
7565 pIemCpu->uInjectCpl = UINT8_MAX;
7566 if ( pOrgCtx->eflags.Bits.u1IF
7567 && TRPMHasTrap(pVCpu)
7568 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7569 {
7570 uint8_t u8TrapNo;
7571 TRPMEVENT enmType;
7572 RTGCUINT uErrCode;
7573 RTGCPTR uCr2;
7574 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
7575 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7576 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7577 TRPMResetTrap(pVCpu);
7578 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7579 }
7580
7581 /*
7582 * Reset the counters.
7583 */
7584 pIemCpu->cIOReads = 0;
7585 pIemCpu->cIOWrites = 0;
7586 pIemCpu->fIgnoreRaxRdx = false;
7587 pIemCpu->fOverlappingMovs = false;
7588 pIemCpu->fUndefinedEFlags = 0;
7589
7590 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7591 {
7592 /*
7593 * Free all verification records.
7594 */
7595 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7596 pIemCpu->pIemEvtRecHead = NULL;
7597 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7598 do
7599 {
7600 while (pEvtRec)
7601 {
7602 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7603 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7604 pIemCpu->pFreeEvtRec = pEvtRec;
7605 pEvtRec = pNext;
7606 }
7607 pEvtRec = pIemCpu->pOtherEvtRecHead;
7608 pIemCpu->pOtherEvtRecHead = NULL;
7609 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7610 } while (pEvtRec);
7611 }
7612}
7613
7614
7615/**
7616 * Allocate an event record.
7617 * @returns Pointer to a record.
7618 */
7619static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7620{
7621 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7622 return NULL;
7623
7624 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7625 if (pEvtRec)
7626 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7627 else
7628 {
7629 if (!pIemCpu->ppIemEvtRecNext)
7630 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7631
7632 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7633 if (!pEvtRec)
7634 return NULL;
7635 }
7636 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7637 pEvtRec->pNext = NULL;
7638 return pEvtRec;
7639}
7640
7641
7642/**
7643 * IOMMMIORead notification.
7644 */
7645VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7646{
7647 PVMCPU pVCpu = VMMGetCpu(pVM);
7648 if (!pVCpu)
7649 return;
7650 PIEMCPU pIemCpu = &pVCpu->iem.s;
7651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7652 if (!pEvtRec)
7653 return;
7654 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7655 pEvtRec->u.RamRead.GCPhys = GCPhys;
7656 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7657 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7658 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7659}
7660
7661
7662/**
7663 * IOMMMIOWrite notification.
7664 */
7665VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7666{
7667 PVMCPU pVCpu = VMMGetCpu(pVM);
7668 if (!pVCpu)
7669 return;
7670 PIEMCPU pIemCpu = &pVCpu->iem.s;
7671 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7672 if (!pEvtRec)
7673 return;
7674 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7675 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7676 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7677 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7678 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7679 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7680 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7681 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7682 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7683}
7684
7685
7686/**
7687 * IOMIOPortRead notification.
7688 */
7689VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7690{
7691 PVMCPU pVCpu = VMMGetCpu(pVM);
7692 if (!pVCpu)
7693 return;
7694 PIEMCPU pIemCpu = &pVCpu->iem.s;
7695 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7696 if (!pEvtRec)
7697 return;
7698 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7699 pEvtRec->u.IOPortRead.Port = Port;
7700 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7701 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7702 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7703}
7704
7705/**
7706 * IOMIOPortWrite notification.
7707 */
7708VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7709{
7710 PVMCPU pVCpu = VMMGetCpu(pVM);
7711 if (!pVCpu)
7712 return;
7713 PIEMCPU pIemCpu = &pVCpu->iem.s;
7714 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7715 if (!pEvtRec)
7716 return;
7717 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7718 pEvtRec->u.IOPortWrite.Port = Port;
7719 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7720 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7721 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7722 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7723}
7724
7725
7726VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7727{
7728 AssertFailed();
7729}
7730
7731
7732VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7733{
7734 AssertFailed();
7735}
7736
7737
7738/**
7739 * Fakes and records an I/O port read.
7740 *
7741 * @returns VINF_SUCCESS.
7742 * @param pIemCpu The IEM per CPU data.
7743 * @param Port The I/O port.
7744 * @param pu32Value Where to store the fake value.
7745 * @param cbValue The size of the access.
7746 */
7747static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7748{
7749 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7750 if (pEvtRec)
7751 {
7752 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7753 pEvtRec->u.IOPortRead.Port = Port;
7754 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7755 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7756 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7757 }
7758 pIemCpu->cIOReads++;
7759 *pu32Value = 0xcccccccc;
7760 return VINF_SUCCESS;
7761}
7762
7763
7764/**
7765 * Fakes and records an I/O port write.
7766 *
7767 * @returns VINF_SUCCESS.
7768 * @param pIemCpu The IEM per CPU data.
7769 * @param Port The I/O port.
7770 * @param u32Value The value being written.
7771 * @param cbValue The size of the access.
7772 */
7773static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7774{
7775 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7776 if (pEvtRec)
7777 {
7778 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7779 pEvtRec->u.IOPortWrite.Port = Port;
7780 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7781 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7782 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7783 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7784 }
7785 pIemCpu->cIOWrites++;
7786 return VINF_SUCCESS;
7787}
7788
7789
7790/**
7791 * Used to add extra details about a stub case.
7792 * @param pIemCpu The IEM per CPU state.
7793 */
7794static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7795{
7796 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7797 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7798 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7799 char szRegs[4096];
7800 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7801 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7802 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7803 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7804 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7805 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7806 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7807 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7808 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7809 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7810 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7811 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7812 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7813 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7814 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7815 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7816 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7817 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7818 " efer=%016VR{efer}\n"
7819 " pat=%016VR{pat}\n"
7820 " sf_mask=%016VR{sf_mask}\n"
7821 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7822 " lstar=%016VR{lstar}\n"
7823 " star=%016VR{star} cstar=%016VR{cstar}\n"
7824 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7825 );
7826
7827 char szInstr1[256];
7828 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
7829 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7830 szInstr1, sizeof(szInstr1), NULL);
7831 char szInstr2[256];
7832 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
7833 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7834 szInstr2, sizeof(szInstr2), NULL);
7835
7836 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7837}
7838
7839
7840/**
7841 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7842 * dump to the assertion info.
7843 *
7844 * @param pEvtRec The record to dump.
7845 */
7846static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7847{
7848 switch (pEvtRec->enmEvent)
7849 {
7850 case IEMVERIFYEVENT_IOPORT_READ:
7851 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7852 pEvtRec->u.IOPortWrite.Port,
7853 pEvtRec->u.IOPortWrite.cbValue);
7854 break;
7855 case IEMVERIFYEVENT_IOPORT_WRITE:
7856 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7857 pEvtRec->u.IOPortWrite.Port,
7858 pEvtRec->u.IOPortWrite.cbValue,
7859 pEvtRec->u.IOPortWrite.u32Value);
7860 break;
7861 case IEMVERIFYEVENT_RAM_READ:
7862 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7863 pEvtRec->u.RamRead.GCPhys,
7864 pEvtRec->u.RamRead.cb);
7865 break;
7866 case IEMVERIFYEVENT_RAM_WRITE:
7867 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7868 pEvtRec->u.RamWrite.GCPhys,
7869 pEvtRec->u.RamWrite.cb,
7870 (int)pEvtRec->u.RamWrite.cb,
7871 pEvtRec->u.RamWrite.ab);
7872 break;
7873 default:
7874 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7875 break;
7876 }
7877}
7878
7879
7880/**
7881 * Raises an assertion on the specified record, showing the given message with
7882 * a record dump attached.
7883 *
7884 * @param pIemCpu The IEM per CPU data.
7885 * @param pEvtRec1 The first record.
7886 * @param pEvtRec2 The second record.
7887 * @param pszMsg The message explaining why we're asserting.
7888 */
7889static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7890{
7891 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7892 iemVerifyAssertAddRecordDump(pEvtRec1);
7893 iemVerifyAssertAddRecordDump(pEvtRec2);
7894 iemVerifyAssertMsg2(pIemCpu);
7895 RTAssertPanic();
7896}
7897
7898
7899/**
7900 * Raises an assertion on the specified record, showing the given message with
7901 * a record dump attached.
7902 *
7903 * @param pIemCpu The IEM per CPU data.
7904 * @param pEvtRec1 The first record.
7905 * @param pszMsg The message explaining why we're asserting.
7906 */
7907static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7908{
7909 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7910 iemVerifyAssertAddRecordDump(pEvtRec);
7911 iemVerifyAssertMsg2(pIemCpu);
7912 RTAssertPanic();
7913}
7914
7915
7916/**
7917 * Verifies a write record.
7918 *
7919 * @param pIemCpu The IEM per CPU data.
7920 * @param pEvtRec The write record.
7921 */
7922static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7923{
7924 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7925 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7926 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7927 if ( RT_FAILURE(rc)
7928 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7929 {
7930 /* fend off ins */
7931 if ( !pIemCpu->cIOReads
7932 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7933 || ( pEvtRec->u.RamWrite.cb != 1
7934 && pEvtRec->u.RamWrite.cb != 2
7935 && pEvtRec->u.RamWrite.cb != 4) )
7936 {
7937 /* fend off ROMs */
7938 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7939 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7940 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7941 {
7942 /* fend off fxsave */
7943 if (pEvtRec->u.RamWrite.cb != 512)
7944 {
7945 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7946 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7947 RTAssertMsg2Add("REM: %.*Rhxs\n"
7948 "IEM: %.*Rhxs\n",
7949 pEvtRec->u.RamWrite.cb, abBuf,
7950 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7951 iemVerifyAssertAddRecordDump(pEvtRec);
7952 iemVerifyAssertMsg2(pIemCpu);
7953 RTAssertPanic();
7954 }
7955 }
7956 }
7957 }
7958
7959}
7960
7961/**
7962 * Performs the post-execution verfication checks.
7963 */
7964static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7965{
7966 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7967 return;
7968
7969 /*
7970 * Switch back the state.
7971 */
7972 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7973 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7974 Assert(pOrgCtx != pDebugCtx);
7975 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7976
7977 /*
7978 * Execute the instruction in REM.
7979 */
7980 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7981 EMRemLock(pVM);
7982 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7983 AssertRC(rc);
7984 EMRemUnlock(pVM);
7985
7986 /*
7987 * Compare the register states.
7988 */
7989 unsigned cDiffs = 0;
7990 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7991 {
7992 //Log(("REM and IEM ends up with different registers!\n"));
7993
7994# define CHECK_FIELD(a_Field) \
7995 do \
7996 { \
7997 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7998 { \
7999 switch (sizeof(pOrgCtx->a_Field)) \
8000 { \
8001 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8002 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8003 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8004 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8005 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8006 } \
8007 cDiffs++; \
8008 } \
8009 } while (0)
8010
8011# define CHECK_BIT_FIELD(a_Field) \
8012 do \
8013 { \
8014 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8015 { \
8016 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8017 cDiffs++; \
8018 } \
8019 } while (0)
8020
8021# define CHECK_SEL(a_Sel) \
8022 do \
8023 { \
8024 CHECK_FIELD(a_Sel.Sel); \
8025 CHECK_FIELD(a_Sel.Attr.u); \
8026 CHECK_FIELD(a_Sel.u64Base); \
8027 CHECK_FIELD(a_Sel.u32Limit); \
8028 CHECK_FIELD(a_Sel.fFlags); \
8029 } while (0)
8030
8031#if 1 /* The recompiler doesn't update these the intel way. */
8032 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8033 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8034 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8035 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8036 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8037 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8038 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8039 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8040 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8041 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8042#endif
8043 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8044 {
8045 RTAssertMsg2Weak(" the FPU state differs\n");
8046 cDiffs++;
8047 CHECK_FIELD(fpu.FCW);
8048 CHECK_FIELD(fpu.FSW);
8049 CHECK_FIELD(fpu.FTW);
8050 CHECK_FIELD(fpu.FOP);
8051 CHECK_FIELD(fpu.FPUIP);
8052 CHECK_FIELD(fpu.CS);
8053 CHECK_FIELD(fpu.Rsrvd1);
8054 CHECK_FIELD(fpu.FPUDP);
8055 CHECK_FIELD(fpu.DS);
8056 CHECK_FIELD(fpu.Rsrvd2);
8057 CHECK_FIELD(fpu.MXCSR);
8058 CHECK_FIELD(fpu.MXCSR_MASK);
8059 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8060 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8061 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8062 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8063 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8064 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8065 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8066 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8067 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8068 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8069 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8070 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8071 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8072 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8073 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8074 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8075 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8076 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8077 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8078 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8079 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8080 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8081 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8082 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8083 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8084 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8085 }
8086 CHECK_FIELD(rip);
8087 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8088 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8089 {
8090 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8091 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8092 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8093 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8094 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8095 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8096 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8097 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8098 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8099 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8100 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8101 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8102 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8103 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8104 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8105 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8106 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8107 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8108 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8109 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8110 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8111 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8112 }
8113
8114 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8115 CHECK_FIELD(rax);
8116 CHECK_FIELD(rcx);
8117 if (!pIemCpu->fIgnoreRaxRdx)
8118 CHECK_FIELD(rdx);
8119 CHECK_FIELD(rbx);
8120 CHECK_FIELD(rsp);
8121 CHECK_FIELD(rbp);
8122 CHECK_FIELD(rsi);
8123 CHECK_FIELD(rdi);
8124 CHECK_FIELD(r8);
8125 CHECK_FIELD(r9);
8126 CHECK_FIELD(r10);
8127 CHECK_FIELD(r11);
8128 CHECK_FIELD(r12);
8129 CHECK_FIELD(r13);
8130 CHECK_SEL(cs);
8131 CHECK_SEL(ss);
8132 CHECK_SEL(ds);
8133 CHECK_SEL(es);
8134 CHECK_SEL(fs);
8135 CHECK_SEL(gs);
8136 CHECK_FIELD(cr0);
8137 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8138 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8139 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8140 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8141 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8142 {
8143 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8144 { /* ignore */ }
8145 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8146 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8147 { /* ignore */ }
8148 else
8149 CHECK_FIELD(cr2);
8150 }
8151 CHECK_FIELD(cr3);
8152 CHECK_FIELD(cr4);
8153 CHECK_FIELD(dr[0]);
8154 CHECK_FIELD(dr[1]);
8155 CHECK_FIELD(dr[2]);
8156 CHECK_FIELD(dr[3]);
8157 CHECK_FIELD(dr[6]);
8158 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8159 CHECK_FIELD(dr[7]);
8160 CHECK_FIELD(gdtr.cbGdt);
8161 CHECK_FIELD(gdtr.pGdt);
8162 CHECK_FIELD(idtr.cbIdt);
8163 CHECK_FIELD(idtr.pIdt);
8164 CHECK_SEL(ldtr);
8165 CHECK_SEL(tr);
8166 CHECK_FIELD(SysEnter.cs);
8167 CHECK_FIELD(SysEnter.eip);
8168 CHECK_FIELD(SysEnter.esp);
8169 CHECK_FIELD(msrEFER);
8170 CHECK_FIELD(msrSTAR);
8171 CHECK_FIELD(msrPAT);
8172 CHECK_FIELD(msrLSTAR);
8173 CHECK_FIELD(msrCSTAR);
8174 CHECK_FIELD(msrSFMASK);
8175 CHECK_FIELD(msrKERNELGSBASE);
8176
8177 if (cDiffs != 0)
8178 {
8179 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
8180 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8181 iemVerifyAssertMsg2(pIemCpu);
8182 RTAssertPanic();
8183 }
8184# undef CHECK_FIELD
8185# undef CHECK_BIT_FIELD
8186 }
8187
8188 /*
8189 * If the register state compared fine, check the verification event
8190 * records.
8191 */
8192 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8193 {
8194 /*
8195 * Compare verficiation event records.
8196 * - I/O port accesses should be a 1:1 match.
8197 */
8198 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8199 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8200 while (pIemRec && pOtherRec)
8201 {
8202 /* Since we might miss RAM writes and reads, ignore reads and check
8203 that any written memory is the same extra ones. */
8204 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8205 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8206 && pIemRec->pNext)
8207 {
8208 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8209 iemVerifyWriteRecord(pIemCpu, pIemRec);
8210 pIemRec = pIemRec->pNext;
8211 }
8212
8213 /* Do the compare. */
8214 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8215 {
8216 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8217 break;
8218 }
8219 bool fEquals;
8220 switch (pIemRec->enmEvent)
8221 {
8222 case IEMVERIFYEVENT_IOPORT_READ:
8223 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8224 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8225 break;
8226 case IEMVERIFYEVENT_IOPORT_WRITE:
8227 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8228 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8229 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8230 break;
8231 case IEMVERIFYEVENT_RAM_READ:
8232 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8233 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8234 break;
8235 case IEMVERIFYEVENT_RAM_WRITE:
8236 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8237 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8238 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8239 break;
8240 default:
8241 fEquals = false;
8242 break;
8243 }
8244 if (!fEquals)
8245 {
8246 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8247 break;
8248 }
8249
8250 /* advance */
8251 pIemRec = pIemRec->pNext;
8252 pOtherRec = pOtherRec->pNext;
8253 }
8254
8255 /* Ignore extra writes and reads. */
8256 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8257 {
8258 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8259 iemVerifyWriteRecord(pIemCpu, pIemRec);
8260 pIemRec = pIemRec->pNext;
8261 }
8262 if (pIemRec != NULL)
8263 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8264 else if (pOtherRec != NULL)
8265 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
8266 }
8267 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8268}
8269
8270#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8271
8272/* stubs */
8273static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8274{
8275 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8276 return VERR_INTERNAL_ERROR;
8277}
8278
8279static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8280{
8281 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8282 return VERR_INTERNAL_ERROR;
8283}
8284
8285#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8286
8287
8288/**
8289 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8290 * IEMExecOneWithPrefetchedByPC.
8291 *
8292 * @return Strict VBox status code.
8293 * @param pVCpu The current virtual CPU.
8294 * @param pIemCpu The IEM per CPU data.
8295 * @param fExecuteInhibit If set, execute the instruction following CLI,
8296 * POP SS and MOV SS,GR.
8297 */
8298DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8299{
8300 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8301 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8302 if (rcStrict == VINF_SUCCESS)
8303 pIemCpu->cInstructions++;
8304//#ifdef DEBUG
8305// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8306//#endif
8307
8308 /* Execute the next instruction as well if a cli, pop ss or
8309 mov ss, Gr has just completed successfully. */
8310 if ( fExecuteInhibit
8311 && rcStrict == VINF_SUCCESS
8312 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8313 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8314 {
8315 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8316 if (rcStrict == VINF_SUCCESS)
8317 {
8318 b; IEM_OPCODE_GET_NEXT_U8(&b);
8319 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8320 if (rcStrict == VINF_SUCCESS)
8321 pIemCpu->cInstructions++;
8322 }
8323 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8324 }
8325
8326 /*
8327 * Return value fiddling and statistics.
8328 */
8329 if (rcStrict != VINF_SUCCESS)
8330 {
8331 if (RT_SUCCESS(rcStrict))
8332 {
8333 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8334 int32_t const rcPassUp = pIemCpu->rcPassUp;
8335 if (rcPassUp == VINF_SUCCESS)
8336 pIemCpu->cRetInfStatuses++;
8337 else if ( rcPassUp < VINF_EM_FIRST
8338 || rcPassUp > VINF_EM_LAST
8339 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8340 {
8341 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8342 pIemCpu->cRetPassUpStatus++;
8343 rcStrict = rcPassUp;
8344 }
8345 else
8346 {
8347 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8348 pIemCpu->cRetInfStatuses++;
8349 }
8350 }
8351 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8352 pIemCpu->cRetAspectNotImplemented++;
8353 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8354 pIemCpu->cRetInstrNotImplemented++;
8355#ifdef IEM_VERIFICATION_MODE_FULL
8356 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8357 rcStrict = VINF_SUCCESS;
8358#endif
8359 else
8360 pIemCpu->cRetErrStatuses++;
8361 }
8362 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8363 {
8364 pIemCpu->cRetPassUpStatus++;
8365 rcStrict = pIemCpu->rcPassUp;
8366 }
8367
8368 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8369 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8370#if defined(IEM_VERIFICATION_MODE_FULL)
8371 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8372 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8373 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8374 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8375#endif
8376 return rcStrict;
8377}
8378
8379
8380#ifdef IN_RC
8381/**
8382 * Re-enters raw-mode or ensure we return to ring-3.
8383 *
8384 * @returns rcStrict, maybe modified.
8385 * @param pIemCpu The IEM CPU structure.
8386 * @param pVCpu The cross context virtual CPU structure of the caller.
8387 * @param pCtx The current CPU context.
8388 * @param rcStrict The status code returne by the interpreter.
8389 */
8390DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
8391{
8392 if (!pIemCpu->fInPatchCode)
8393 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
8394 return rcStrict;
8395}
8396#endif
8397
8398
8399/**
8400 * Execute one instruction.
8401 *
8402 * @return Strict VBox status code.
8403 * @param pVCpu The current virtual CPU.
8404 */
8405VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8406{
8407 PIEMCPU pIemCpu = &pVCpu->iem.s;
8408
8409#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8410 iemExecVerificationModeSetup(pIemCpu);
8411#endif
8412#ifdef LOG_ENABLED
8413 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8414# ifdef IN_RING3
8415 if (LogIs2Enabled())
8416 {
8417 char szInstr[256];
8418 uint32_t cbInstr = 0;
8419 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8420 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8421 szInstr, sizeof(szInstr), &cbInstr);
8422
8423 Log3(("**** "
8424 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8425 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8426 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8427 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8428 " %s\n"
8429 ,
8430 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8431 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8432 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8433 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8434 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8435 szInstr));
8436
8437 if (LogIs3Enabled())
8438 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
8439 }
8440 else
8441# endif
8442 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8443 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8444#endif
8445
8446 /*
8447 * Do the decoding and emulation.
8448 */
8449 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8450 if (rcStrict == VINF_SUCCESS)
8451 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8452
8453#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8454 /*
8455 * Assert some sanity.
8456 */
8457 iemExecVerificationModeCheck(pIemCpu);
8458#endif
8459#ifdef IN_RC
8460 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8461#endif
8462 if (rcStrict != VINF_SUCCESS)
8463 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8464 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8465 return rcStrict;
8466}
8467
8468
8469VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8470{
8471 PIEMCPU pIemCpu = &pVCpu->iem.s;
8472 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8473 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8474
8475 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8476 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8477 if (rcStrict == VINF_SUCCESS)
8478 {
8479 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8480 if (pcbWritten)
8481 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8482 }
8483
8484#ifdef IN_RC
8485 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8486#endif
8487 return rcStrict;
8488}
8489
8490
8491VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8492 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8493{
8494 PIEMCPU pIemCpu = &pVCpu->iem.s;
8495 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8496 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8497
8498 VBOXSTRICTRC rcStrict;
8499 if ( cbOpcodeBytes
8500 && pCtx->rip == OpcodeBytesPC)
8501 {
8502 iemInitDecoder(pIemCpu, false);
8503 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8504 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8505 rcStrict = VINF_SUCCESS;
8506 }
8507 else
8508 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8509 if (rcStrict == VINF_SUCCESS)
8510 {
8511 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8512 }
8513
8514#ifdef IN_RC
8515 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8516#endif
8517 return rcStrict;
8518}
8519
8520
8521VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8522{
8523 PIEMCPU pIemCpu = &pVCpu->iem.s;
8524 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8525 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8526
8527 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8528 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8529 if (rcStrict == VINF_SUCCESS)
8530 {
8531 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8532 if (pcbWritten)
8533 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8534 }
8535
8536#ifdef IN_RC
8537 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8538#endif
8539 return rcStrict;
8540}
8541
8542
8543VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8544 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8545{
8546 PIEMCPU pIemCpu = &pVCpu->iem.s;
8547 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8548 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8549
8550 VBOXSTRICTRC rcStrict;
8551 if ( cbOpcodeBytes
8552 && pCtx->rip == OpcodeBytesPC)
8553 {
8554 iemInitDecoder(pIemCpu, true);
8555 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8556 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8557 rcStrict = VINF_SUCCESS;
8558 }
8559 else
8560 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8561 if (rcStrict == VINF_SUCCESS)
8562 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8563
8564#ifdef IN_RC
8565 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8566#endif
8567 return rcStrict;
8568}
8569
8570
8571VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8572{
8573 return IEMExecOne(pVCpu);
8574}
8575
8576
8577
8578/**
8579 * Injects a trap, fault, abort, software interrupt or external interrupt.
8580 *
8581 * The parameter list matches TRPMQueryTrapAll pretty closely.
8582 *
8583 * @returns Strict VBox status code.
8584 * @param pVCpu The current virtual CPU.
8585 * @param u8TrapNo The trap number.
8586 * @param enmType What type is it (trap/fault/abort), software
8587 * interrupt or hardware interrupt.
8588 * @param uErrCode The error code if applicable.
8589 * @param uCr2 The CR2 value if applicable.
8590 */
8591VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8592{
8593 iemInitDecoder(&pVCpu->iem.s, false);
8594
8595 uint32_t fFlags;
8596 switch (enmType)
8597 {
8598 case TRPM_HARDWARE_INT:
8599 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8600 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8601 uErrCode = uCr2 = 0;
8602 break;
8603
8604 case TRPM_SOFTWARE_INT:
8605 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8606 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8607 uErrCode = uCr2 = 0;
8608 break;
8609
8610 case TRPM_TRAP:
8611 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8612 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8613 if (u8TrapNo == X86_XCPT_PF)
8614 fFlags |= IEM_XCPT_FLAGS_CR2;
8615 switch (u8TrapNo)
8616 {
8617 case X86_XCPT_DF:
8618 case X86_XCPT_TS:
8619 case X86_XCPT_NP:
8620 case X86_XCPT_SS:
8621 case X86_XCPT_PF:
8622 case X86_XCPT_AC:
8623 fFlags |= IEM_XCPT_FLAGS_ERR;
8624 break;
8625 }
8626 break;
8627
8628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8629 }
8630
8631 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8632}
8633
8634
8635VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8636{
8637 return VERR_NOT_IMPLEMENTED;
8638}
8639
8640
8641VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8642{
8643 return VERR_NOT_IMPLEMENTED;
8644}
8645
8646
8647#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8648/**
8649 * Executes a IRET instruction with default operand size.
8650 *
8651 * This is for PATM.
8652 *
8653 * @returns VBox status code.
8654 * @param pVCpu The current virtual CPU.
8655 * @param pCtxCore The register frame.
8656 */
8657VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8658{
8659 PIEMCPU pIemCpu = &pVCpu->iem.s;
8660 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8661
8662 iemCtxCoreToCtx(pCtx, pCtxCore);
8663 iemInitDecoder(pIemCpu);
8664 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8665 if (rcStrict == VINF_SUCCESS)
8666 iemCtxToCtxCore(pCtxCore, pCtx);
8667 else
8668 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8669 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8670 return rcStrict;
8671}
8672#endif
8673
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette