VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 41829

Last change on this file since 41829 was 41829, checked in by vboxsync, 12 years ago

IEM: Implemented IEMExecOneWithPrefetchedByPC and IEMExecOneEx.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 289.8 KB
Line 
1/* $Id: IEMAll.cpp 41829 2012-06-19 14:39:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Additional exception details, basic enter/exit IEM
65 * state info.
66 * - Level 2 (Log2): ?
67 * - Level 3 (Log3): More detailed enter/exit IEM state info.
68 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
69 * - Level 5 (Log5): Decoding details.
70 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
71 *
72 */
73
74/*******************************************************************************
75* Header Files *
76*******************************************************************************/
77#define LOG_GROUP LOG_GROUP_IEM
78#include <VBox/vmm/iem.h>
79#include <VBox/vmm/pgm.h>
80#include <VBox/vmm/iom.h>
81#include <VBox/vmm/em.h>
82#include <VBox/vmm/tm.h>
83#include <VBox/vmm/dbgf.h>
84#ifdef IEM_VERIFICATION_MODE
85# include <VBox/vmm/rem.h>
86# include <VBox/vmm/mm.h>
87#endif
88#include "IEMInternal.h"
89#include <VBox/vmm/vm.h>
90#include <VBox/log.h>
91#include <VBox/err.h>
92#include <VBox/param.h>
93#include <iprt/assert.h>
94#include <iprt/string.h>
95#include <iprt/x86.h>
96
97
98/*******************************************************************************
99* Structures and Typedefs *
100*******************************************************************************/
101/** @typedef PFNIEMOP
102 * Pointer to an opcode decoder function.
103 */
104
105/** @def FNIEMOP_DEF
106 * Define an opcode decoder function.
107 *
108 * We're using macors for this so that adding and removing parameters as well as
109 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
110 *
111 * @param a_Name The function name.
112 */
113
114
115#if defined(__GNUC__) && defined(RT_ARCH_X86)
116typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
117# define FNIEMOP_DEF(a_Name) \
118 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
119# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
120 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
121# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
122 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
123
124#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
125typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
126# define FNIEMOP_DEF(a_Name) \
127 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
128# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
129 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
130# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
131 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
132
133#elif defined(__GNUC__)
134typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
135# define FNIEMOP_DEF(a_Name) \
136 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
137# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
138 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
139# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
140 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
141
142#else
143typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
144# define FNIEMOP_DEF(a_Name) \
145 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
146# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
147 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
148# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
149 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
150
151#endif
152
153
154/**
155 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
156 */
157typedef union IEMSELDESC
158{
159 /** The legacy view. */
160 X86DESC Legacy;
161 /** The long mode view. */
162 X86DESC64 Long;
163} IEMSELDESC;
164/** Pointer to a selector descriptor table entry. */
165typedef IEMSELDESC *PIEMSELDESC;
166
167
168/*******************************************************************************
169* Defined Constants And Macros *
170*******************************************************************************/
171/** @name IEM status codes.
172 *
173 * Not quite sure how this will play out in the end, just aliasing safe status
174 * codes for now.
175 *
176 * @{ */
177#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
178/** @} */
179
180/** Temporary hack to disable the double execution. Will be removed in favor
181 * of a dedicated execution mode in EM. */
182//#define IEM_VERIFICATION_MODE_NO_REM
183
184/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
185 * due to GCC lacking knowledge about the value range of a switch. */
186#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
187
188/**
189 * Call an opcode decoder function.
190 *
191 * We're using macors for this so that adding and removing parameters can be
192 * done as we please. See FNIEMOP_DEF.
193 */
194#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
195
196/**
197 * Call a common opcode decoder function taking one extra argument.
198 *
199 * We're using macors for this so that adding and removing parameters can be
200 * done as we please. See FNIEMOP_DEF_1.
201 */
202#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
203
204/**
205 * Call a common opcode decoder function taking one extra argument.
206 *
207 * We're using macors for this so that adding and removing parameters can be
208 * done as we please. See FNIEMOP_DEF_1.
209 */
210#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
211
212/**
213 * Check if we're currently executing in real or virtual 8086 mode.
214 *
215 * @returns @c true if it is, @c false if not.
216 * @param a_pIemCpu The IEM state of the current CPU.
217 */
218#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
219
220/**
221 * Check if we're currently executing in long mode.
222 *
223 * @returns @c true if it is, @c false if not.
224 * @param a_pIemCpu The IEM state of the current CPU.
225 */
226#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
227
228/**
229 * Check if we're currently executing in real mode.
230 *
231 * @returns @c true if it is, @c false if not.
232 * @param a_pIemCpu The IEM state of the current CPU.
233 */
234#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
235
236/**
237 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
238 */
239#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
240
241/**
242 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
243 */
244#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
245
246/**
247 * Tests if at least on of the specified AMD CPUID features (extended) are
248 * marked present.
249 */
250#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
251
252/**
253 * Checks if a intel CPUID feature is present.
254 */
255#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
256 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
257 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
258
259/**
260 * Check if the address is canonical.
261 */
262#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
263
264
265/*******************************************************************************
266* Global Variables *
267*******************************************************************************/
268extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
269
270
271/** Function table for the ADD instruction. */
272static const IEMOPBINSIZES g_iemAImpl_add =
273{
274 iemAImpl_add_u8, iemAImpl_add_u8_locked,
275 iemAImpl_add_u16, iemAImpl_add_u16_locked,
276 iemAImpl_add_u32, iemAImpl_add_u32_locked,
277 iemAImpl_add_u64, iemAImpl_add_u64_locked
278};
279
280/** Function table for the ADC instruction. */
281static const IEMOPBINSIZES g_iemAImpl_adc =
282{
283 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
284 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
285 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
286 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
287};
288
289/** Function table for the SUB instruction. */
290static const IEMOPBINSIZES g_iemAImpl_sub =
291{
292 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
293 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
294 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
295 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
296};
297
298/** Function table for the SBB instruction. */
299static const IEMOPBINSIZES g_iemAImpl_sbb =
300{
301 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
302 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
303 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
304 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
305};
306
307/** Function table for the OR instruction. */
308static const IEMOPBINSIZES g_iemAImpl_or =
309{
310 iemAImpl_or_u8, iemAImpl_or_u8_locked,
311 iemAImpl_or_u16, iemAImpl_or_u16_locked,
312 iemAImpl_or_u32, iemAImpl_or_u32_locked,
313 iemAImpl_or_u64, iemAImpl_or_u64_locked
314};
315
316/** Function table for the XOR instruction. */
317static const IEMOPBINSIZES g_iemAImpl_xor =
318{
319 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
320 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
321 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
322 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
323};
324
325/** Function table for the AND instruction. */
326static const IEMOPBINSIZES g_iemAImpl_and =
327{
328 iemAImpl_and_u8, iemAImpl_and_u8_locked,
329 iemAImpl_and_u16, iemAImpl_and_u16_locked,
330 iemAImpl_and_u32, iemAImpl_and_u32_locked,
331 iemAImpl_and_u64, iemAImpl_and_u64_locked
332};
333
334/** Function table for the CMP instruction.
335 * @remarks Making operand order ASSUMPTIONS.
336 */
337static const IEMOPBINSIZES g_iemAImpl_cmp =
338{
339 iemAImpl_cmp_u8, NULL,
340 iemAImpl_cmp_u16, NULL,
341 iemAImpl_cmp_u32, NULL,
342 iemAImpl_cmp_u64, NULL
343};
344
345/** Function table for the TEST instruction.
346 * @remarks Making operand order ASSUMPTIONS.
347 */
348static const IEMOPBINSIZES g_iemAImpl_test =
349{
350 iemAImpl_test_u8, NULL,
351 iemAImpl_test_u16, NULL,
352 iemAImpl_test_u32, NULL,
353 iemAImpl_test_u64, NULL
354};
355
356/** Function table for the BT instruction. */
357static const IEMOPBINSIZES g_iemAImpl_bt =
358{
359 NULL, NULL,
360 iemAImpl_bt_u16, NULL,
361 iemAImpl_bt_u32, NULL,
362 iemAImpl_bt_u64, NULL
363};
364
365/** Function table for the BTC instruction. */
366static const IEMOPBINSIZES g_iemAImpl_btc =
367{
368 NULL, NULL,
369 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
370 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
371 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
372};
373
374/** Function table for the BTR instruction. */
375static const IEMOPBINSIZES g_iemAImpl_btr =
376{
377 NULL, NULL,
378 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
379 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
380 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
381};
382
383/** Function table for the BTS instruction. */
384static const IEMOPBINSIZES g_iemAImpl_bts =
385{
386 NULL, NULL,
387 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
388 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
389 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
390};
391
392/** Function table for the BSF instruction. */
393static const IEMOPBINSIZES g_iemAImpl_bsf =
394{
395 NULL, NULL,
396 iemAImpl_bsf_u16, NULL,
397 iemAImpl_bsf_u32, NULL,
398 iemAImpl_bsf_u64, NULL
399};
400
401/** Function table for the BSR instruction. */
402static const IEMOPBINSIZES g_iemAImpl_bsr =
403{
404 NULL, NULL,
405 iemAImpl_bsr_u16, NULL,
406 iemAImpl_bsr_u32, NULL,
407 iemAImpl_bsr_u64, NULL
408};
409
410/** Function table for the IMUL instruction. */
411static const IEMOPBINSIZES g_iemAImpl_imul_two =
412{
413 NULL, NULL,
414 iemAImpl_imul_two_u16, NULL,
415 iemAImpl_imul_two_u32, NULL,
416 iemAImpl_imul_two_u64, NULL
417};
418
419/** Group 1 /r lookup table. */
420static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
421{
422 &g_iemAImpl_add,
423 &g_iemAImpl_or,
424 &g_iemAImpl_adc,
425 &g_iemAImpl_sbb,
426 &g_iemAImpl_and,
427 &g_iemAImpl_sub,
428 &g_iemAImpl_xor,
429 &g_iemAImpl_cmp
430};
431
432/** Function table for the INC instruction. */
433static const IEMOPUNARYSIZES g_iemAImpl_inc =
434{
435 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
436 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
437 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
438 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
439};
440
441/** Function table for the DEC instruction. */
442static const IEMOPUNARYSIZES g_iemAImpl_dec =
443{
444 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
445 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
446 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
447 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
448};
449
450/** Function table for the NEG instruction. */
451static const IEMOPUNARYSIZES g_iemAImpl_neg =
452{
453 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
454 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
455 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
456 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
457};
458
459/** Function table for the NOT instruction. */
460static const IEMOPUNARYSIZES g_iemAImpl_not =
461{
462 iemAImpl_not_u8, iemAImpl_not_u8_locked,
463 iemAImpl_not_u16, iemAImpl_not_u16_locked,
464 iemAImpl_not_u32, iemAImpl_not_u32_locked,
465 iemAImpl_not_u64, iemAImpl_not_u64_locked
466};
467
468
469/** Function table for the ROL instruction. */
470static const IEMOPSHIFTSIZES g_iemAImpl_rol =
471{
472 iemAImpl_rol_u8,
473 iemAImpl_rol_u16,
474 iemAImpl_rol_u32,
475 iemAImpl_rol_u64
476};
477
478/** Function table for the ROR instruction. */
479static const IEMOPSHIFTSIZES g_iemAImpl_ror =
480{
481 iemAImpl_ror_u8,
482 iemAImpl_ror_u16,
483 iemAImpl_ror_u32,
484 iemAImpl_ror_u64
485};
486
487/** Function table for the RCL instruction. */
488static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
489{
490 iemAImpl_rcl_u8,
491 iemAImpl_rcl_u16,
492 iemAImpl_rcl_u32,
493 iemAImpl_rcl_u64
494};
495
496/** Function table for the RCR instruction. */
497static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
498{
499 iemAImpl_rcr_u8,
500 iemAImpl_rcr_u16,
501 iemAImpl_rcr_u32,
502 iemAImpl_rcr_u64
503};
504
505/** Function table for the SHL instruction. */
506static const IEMOPSHIFTSIZES g_iemAImpl_shl =
507{
508 iemAImpl_shl_u8,
509 iemAImpl_shl_u16,
510 iemAImpl_shl_u32,
511 iemAImpl_shl_u64
512};
513
514/** Function table for the SHR instruction. */
515static const IEMOPSHIFTSIZES g_iemAImpl_shr =
516{
517 iemAImpl_shr_u8,
518 iemAImpl_shr_u16,
519 iemAImpl_shr_u32,
520 iemAImpl_shr_u64
521};
522
523/** Function table for the SAR instruction. */
524static const IEMOPSHIFTSIZES g_iemAImpl_sar =
525{
526 iemAImpl_sar_u8,
527 iemAImpl_sar_u16,
528 iemAImpl_sar_u32,
529 iemAImpl_sar_u64
530};
531
532
533/** Function table for the MUL instruction. */
534static const IEMOPMULDIVSIZES g_iemAImpl_mul =
535{
536 iemAImpl_mul_u8,
537 iemAImpl_mul_u16,
538 iemAImpl_mul_u32,
539 iemAImpl_mul_u64
540};
541
542/** Function table for the IMUL instruction working implicitly on rAX. */
543static const IEMOPMULDIVSIZES g_iemAImpl_imul =
544{
545 iemAImpl_imul_u8,
546 iemAImpl_imul_u16,
547 iemAImpl_imul_u32,
548 iemAImpl_imul_u64
549};
550
551/** Function table for the DIV instruction. */
552static const IEMOPMULDIVSIZES g_iemAImpl_div =
553{
554 iemAImpl_div_u8,
555 iemAImpl_div_u16,
556 iemAImpl_div_u32,
557 iemAImpl_div_u64
558};
559
560/** Function table for the MUL instruction. */
561static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
562{
563 iemAImpl_idiv_u8,
564 iemAImpl_idiv_u16,
565 iemAImpl_idiv_u32,
566 iemAImpl_idiv_u64
567};
568
569/** Function table for the SHLD instruction */
570static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
571{
572 iemAImpl_shld_u16,
573 iemAImpl_shld_u32,
574 iemAImpl_shld_u64,
575};
576
577/** Function table for the SHRD instruction */
578static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
579{
580 iemAImpl_shrd_u16,
581 iemAImpl_shrd_u32,
582 iemAImpl_shrd_u64,
583};
584
585
586/*******************************************************************************
587* Internal Functions *
588*******************************************************************************/
589static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
590/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
591static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
592static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
593static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
594static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
595static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
596static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
597static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
598static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
599static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
600static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
601static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
602static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
603static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
604static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
605static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
606static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
607static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
608static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
609static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
610static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
611static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
612static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
613
614#ifdef IEM_VERIFICATION_MODE
615static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
616#endif
617static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
618static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
619
620
621/**
622 * Initializes the decoder state.
623 *
624 * @param pIemCpu The per CPU IEM state.
625 */
626DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
627{
628 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
629
630 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
631 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
632 ? IEMMODE_64BIT
633 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
634 ? IEMMODE_32BIT
635 : IEMMODE_16BIT;
636 pIemCpu->enmCpuMode = enmMode;
637 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
638 pIemCpu->enmEffAddrMode = enmMode;
639 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
640 pIemCpu->enmEffOpSize = enmMode;
641 pIemCpu->fPrefixes = 0;
642 pIemCpu->uRexReg = 0;
643 pIemCpu->uRexB = 0;
644 pIemCpu->uRexIndex = 0;
645 pIemCpu->iEffSeg = X86_SREG_DS;
646 pIemCpu->offOpcode = 0;
647 pIemCpu->cbOpcode = 0;
648 pIemCpu->cActiveMappings = 0;
649 pIemCpu->iNextMapping = 0;
650}
651
652
653/**
654 * Prefetch opcodes the first time when starting executing.
655 *
656 * @returns Strict VBox status code.
657 * @param pIemCpu The IEM state.
658 */
659static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
660{
661#ifdef IEM_VERIFICATION_MODE
662 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
663#endif
664 iemInitDecoder(pIemCpu);
665
666 /*
667 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
668 *
669 * First translate CS:rIP to a physical address.
670 */
671 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
672 uint32_t cbToTryRead;
673 RTGCPTR GCPtrPC;
674 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
675 {
676 cbToTryRead = PAGE_SIZE;
677 GCPtrPC = pCtx->rip;
678 if (!IEM_IS_CANONICAL(GCPtrPC))
679 return iemRaiseGeneralProtectionFault0(pIemCpu);
680 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
681 }
682 else
683 {
684 uint32_t GCPtrPC32 = pCtx->eip;
685 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
686 if (GCPtrPC32 > pCtx->csHid.u32Limit)
687 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
688 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
689 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
690 }
691
692 RTGCPHYS GCPhys;
693 uint64_t fFlags;
694 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
695 if (RT_FAILURE(rc))
696 {
697 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
698 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
699 }
700 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
701 {
702 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
703 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
704 }
705 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
706 {
707 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
708 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
709 }
710 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
711 /** @todo Check reserved bits and such stuff. PGM is better at doing
712 * that, so do it when implementing the guest virtual address
713 * TLB... */
714
715#ifdef IEM_VERIFICATION_MODE
716 /*
717 * Optimistic optimization: Use unconsumed opcode bytes from the previous
718 * instruction.
719 */
720 /** @todo optimize this differently by not using PGMPhysRead. */
721 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
722 pIemCpu->GCPhysOpcodes = GCPhys;
723 if ( offPrevOpcodes < cbOldOpcodes
724 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
725 {
726 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
727 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
728 pIemCpu->cbOpcode = cbNew;
729 return VINF_SUCCESS;
730 }
731#endif
732
733 /*
734 * Read the bytes at this address.
735 */
736 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
737 if (cbToTryRead > cbLeftOnPage)
738 cbToTryRead = cbLeftOnPage;
739 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
740 cbToTryRead = sizeof(pIemCpu->abOpcode);
741 /** @todo patch manager */
742 if (!pIemCpu->fByPassHandlers)
743 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
744 else
745 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
746 if (rc != VINF_SUCCESS)
747 {
748 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
749 return rc;
750 }
751 pIemCpu->cbOpcode = cbToTryRead;
752
753 return VINF_SUCCESS;
754}
755
756
757/**
758 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
759 * exception if it fails.
760 *
761 * @returns Strict VBox status code.
762 * @param pIemCpu The IEM state.
763 * @param cbMin Where to return the opcode byte.
764 */
765static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
766{
767 /*
768 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
769 *
770 * First translate CS:rIP to a physical address.
771 */
772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
773 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
774 uint32_t cbToTryRead;
775 RTGCPTR GCPtrNext;
776 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
777 {
778 cbToTryRead = PAGE_SIZE;
779 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
780 if (!IEM_IS_CANONICAL(GCPtrNext))
781 return iemRaiseGeneralProtectionFault0(pIemCpu);
782 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
783 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
784 }
785 else
786 {
787 uint32_t GCPtrNext32 = pCtx->eip;
788 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
789 GCPtrNext32 += pIemCpu->cbOpcode;
790 if (GCPtrNext32 > pCtx->csHid.u32Limit)
791 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
792 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
793 if (cbToTryRead < cbMin - cbLeft)
794 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
795 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
796 }
797
798 RTGCPHYS GCPhys;
799 uint64_t fFlags;
800 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
801 if (RT_FAILURE(rc))
802 {
803 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
804 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
805 }
806 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
807 {
808 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
809 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
810 }
811 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
812 {
813 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
814 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
815 }
816 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
817 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
818 /** @todo Check reserved bits and such stuff. PGM is better at doing
819 * that, so do it when implementing the guest virtual address
820 * TLB... */
821
822 /*
823 * Read the bytes at this address.
824 */
825 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
826 if (cbToTryRead > cbLeftOnPage)
827 cbToTryRead = cbLeftOnPage;
828 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
829 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
830 Assert(cbToTryRead >= cbMin - cbLeft);
831 if (!pIemCpu->fByPassHandlers)
832 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
833 else
834 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
835 if (rc != VINF_SUCCESS)
836 {
837 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
838 return rc;
839 }
840 pIemCpu->cbOpcode += cbToTryRead;
841 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
842
843 return VINF_SUCCESS;
844}
845
846
847/**
848 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
849 *
850 * @returns Strict VBox status code.
851 * @param pIemCpu The IEM state.
852 * @param pb Where to return the opcode byte.
853 */
854DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
855{
856 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
857 if (rcStrict == VINF_SUCCESS)
858 {
859 uint8_t offOpcode = pIemCpu->offOpcode;
860 *pb = pIemCpu->abOpcode[offOpcode];
861 pIemCpu->offOpcode = offOpcode + 1;
862 }
863 else
864 *pb = 0;
865 return rcStrict;
866}
867
868
869/**
870 * Fetches the next opcode byte.
871 *
872 * @returns Strict VBox status code.
873 * @param pIemCpu The IEM state.
874 * @param pu8 Where to return the opcode byte.
875 */
876DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
877{
878 uint8_t const offOpcode = pIemCpu->offOpcode;
879 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
880 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
881
882 *pu8 = pIemCpu->abOpcode[offOpcode];
883 pIemCpu->offOpcode = offOpcode + 1;
884 return VINF_SUCCESS;
885}
886
887
888/**
889 * Fetches the next opcode byte, returns automatically on failure.
890 *
891 * @param a_pu8 Where to return the opcode byte.
892 * @remark Implicitly references pIemCpu.
893 */
894#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
895 do \
896 { \
897 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
898 if (rcStrict2 != VINF_SUCCESS) \
899 return rcStrict2; \
900 } while (0)
901
902
903/**
904 * Fetches the next signed byte from the opcode stream.
905 *
906 * @returns Strict VBox status code.
907 * @param pIemCpu The IEM state.
908 * @param pi8 Where to return the signed byte.
909 */
910DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
911{
912 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
913}
914
915
916/**
917 * Fetches the next signed byte from the opcode stream, returning automatically
918 * on failure.
919 *
920 * @param pi8 Where to return the signed byte.
921 * @remark Implicitly references pIemCpu.
922 */
923#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
924 do \
925 { \
926 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
927 if (rcStrict2 != VINF_SUCCESS) \
928 return rcStrict2; \
929 } while (0)
930
931
932/**
933 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
934 *
935 * @returns Strict VBox status code.
936 * @param pIemCpu The IEM state.
937 * @param pu16 Where to return the opcode dword.
938 */
939DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
940{
941 uint8_t u8;
942 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
943 if (rcStrict == VINF_SUCCESS)
944 *pu16 = (int8_t)u8;
945 return rcStrict;
946}
947
948
949/**
950 * Fetches the next signed byte from the opcode stream, extending it to
951 * unsigned 16-bit.
952 *
953 * @returns Strict VBox status code.
954 * @param pIemCpu The IEM state.
955 * @param pu16 Where to return the unsigned word.
956 */
957DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
958{
959 uint8_t const offOpcode = pIemCpu->offOpcode;
960 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
961 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
962
963 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
964 pIemCpu->offOpcode = offOpcode + 1;
965 return VINF_SUCCESS;
966}
967
968
969/**
970 * Fetches the next signed byte from the opcode stream and sign-extending it to
971 * a word, returning automatically on failure.
972 *
973 * @param pu16 Where to return the word.
974 * @remark Implicitly references pIemCpu.
975 */
976#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
977 do \
978 { \
979 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
980 if (rcStrict2 != VINF_SUCCESS) \
981 return rcStrict2; \
982 } while (0)
983
984
985/**
986 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
987 *
988 * @returns Strict VBox status code.
989 * @param pIemCpu The IEM state.
990 * @param pu16 Where to return the opcode word.
991 */
992DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
993{
994 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
995 if (rcStrict == VINF_SUCCESS)
996 {
997 uint8_t offOpcode = pIemCpu->offOpcode;
998 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
999 pIemCpu->offOpcode = offOpcode + 2;
1000 }
1001 else
1002 *pu16 = 0;
1003 return rcStrict;
1004}
1005
1006
1007/**
1008 * Fetches the next opcode word.
1009 *
1010 * @returns Strict VBox status code.
1011 * @param pIemCpu The IEM state.
1012 * @param pu16 Where to return the opcode word.
1013 */
1014DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1015{
1016 uint8_t const offOpcode = pIemCpu->offOpcode;
1017 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1018 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1019
1020 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1021 pIemCpu->offOpcode = offOpcode + 2;
1022 return VINF_SUCCESS;
1023}
1024
1025
1026/**
1027 * Fetches the next opcode word, returns automatically on failure.
1028 *
1029 * @param a_pu16 Where to return the opcode word.
1030 * @remark Implicitly references pIemCpu.
1031 */
1032#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1033 do \
1034 { \
1035 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1036 if (rcStrict2 != VINF_SUCCESS) \
1037 return rcStrict2; \
1038 } while (0)
1039
1040
1041/**
1042 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1043 *
1044 * @returns Strict VBox status code.
1045 * @param pIemCpu The IEM state.
1046 * @param pu32 Where to return the opcode double word.
1047 */
1048DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1049{
1050 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1051 if (rcStrict == VINF_SUCCESS)
1052 {
1053 uint8_t offOpcode = pIemCpu->offOpcode;
1054 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1055 pIemCpu->offOpcode = offOpcode + 2;
1056 }
1057 else
1058 *pu32 = 0;
1059 return rcStrict;
1060}
1061
1062
1063/**
1064 * Fetches the next opcode word, zero extending it to a double word.
1065 *
1066 * @returns Strict VBox status code.
1067 * @param pIemCpu The IEM state.
1068 * @param pu32 Where to return the opcode double word.
1069 */
1070DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1071{
1072 uint8_t const offOpcode = pIemCpu->offOpcode;
1073 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1074 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1075
1076 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1077 pIemCpu->offOpcode = offOpcode + 2;
1078 return VINF_SUCCESS;
1079}
1080
1081
1082/**
1083 * Fetches the next opcode word and zero extends it to a double word, returns
1084 * automatically on failure.
1085 *
1086 * @param a_pu32 Where to return the opcode double word.
1087 * @remark Implicitly references pIemCpu.
1088 */
1089#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1090 do \
1091 { \
1092 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1093 if (rcStrict2 != VINF_SUCCESS) \
1094 return rcStrict2; \
1095 } while (0)
1096
1097
1098/**
1099 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1100 *
1101 * @returns Strict VBox status code.
1102 * @param pIemCpu The IEM state.
1103 * @param pu64 Where to return the opcode quad word.
1104 */
1105DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1106{
1107 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1108 if (rcStrict == VINF_SUCCESS)
1109 {
1110 uint8_t offOpcode = pIemCpu->offOpcode;
1111 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1112 pIemCpu->offOpcode = offOpcode + 2;
1113 }
1114 else
1115 *pu64 = 0;
1116 return rcStrict;
1117}
1118
1119
1120/**
1121 * Fetches the next opcode word, zero extending it to a quad word.
1122 *
1123 * @returns Strict VBox status code.
1124 * @param pIemCpu The IEM state.
1125 * @param pu64 Where to return the opcode quad word.
1126 */
1127DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1128{
1129 uint8_t const offOpcode = pIemCpu->offOpcode;
1130 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1131 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1132
1133 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1134 pIemCpu->offOpcode = offOpcode + 2;
1135 return VINF_SUCCESS;
1136}
1137
1138
1139/**
1140 * Fetches the next opcode word and zero extends it to a quad word, returns
1141 * automatically on failure.
1142 *
1143 * @param a_pu64 Where to return the opcode quad word.
1144 * @remark Implicitly references pIemCpu.
1145 */
1146#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1147 do \
1148 { \
1149 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1150 if (rcStrict2 != VINF_SUCCESS) \
1151 return rcStrict2; \
1152 } while (0)
1153
1154
1155/**
1156 * Fetches the next signed word from the opcode stream.
1157 *
1158 * @returns Strict VBox status code.
1159 * @param pIemCpu The IEM state.
1160 * @param pi16 Where to return the signed word.
1161 */
1162DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1163{
1164 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1165}
1166
1167
1168/**
1169 * Fetches the next signed word from the opcode stream, returning automatically
1170 * on failure.
1171 *
1172 * @param pi16 Where to return the signed word.
1173 * @remark Implicitly references pIemCpu.
1174 */
1175#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1176 do \
1177 { \
1178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1179 if (rcStrict2 != VINF_SUCCESS) \
1180 return rcStrict2; \
1181 } while (0)
1182
1183
1184/**
1185 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1186 *
1187 * @returns Strict VBox status code.
1188 * @param pIemCpu The IEM state.
1189 * @param pu32 Where to return the opcode dword.
1190 */
1191DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1192{
1193 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1194 if (rcStrict == VINF_SUCCESS)
1195 {
1196 uint8_t offOpcode = pIemCpu->offOpcode;
1197 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1198 pIemCpu->abOpcode[offOpcode + 1],
1199 pIemCpu->abOpcode[offOpcode + 2],
1200 pIemCpu->abOpcode[offOpcode + 3]);
1201 pIemCpu->offOpcode = offOpcode + 4;
1202 }
1203 else
1204 *pu32 = 0;
1205 return rcStrict;
1206}
1207
1208
1209/**
1210 * Fetches the next opcode dword.
1211 *
1212 * @returns Strict VBox status code.
1213 * @param pIemCpu The IEM state.
1214 * @param pu32 Where to return the opcode double word.
1215 */
1216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1217{
1218 uint8_t const offOpcode = pIemCpu->offOpcode;
1219 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1220 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1221
1222 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1223 pIemCpu->abOpcode[offOpcode + 1],
1224 pIemCpu->abOpcode[offOpcode + 2],
1225 pIemCpu->abOpcode[offOpcode + 3]);
1226 pIemCpu->offOpcode = offOpcode + 4;
1227 return VINF_SUCCESS;
1228}
1229
1230
1231/**
1232 * Fetches the next opcode dword, returns automatically on failure.
1233 *
1234 * @param a_pu32 Where to return the opcode dword.
1235 * @remark Implicitly references pIemCpu.
1236 */
1237#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1238 do \
1239 { \
1240 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1241 if (rcStrict2 != VINF_SUCCESS) \
1242 return rcStrict2; \
1243 } while (0)
1244
1245
1246/**
1247 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1248 *
1249 * @returns Strict VBox status code.
1250 * @param pIemCpu The IEM state.
1251 * @param pu32 Where to return the opcode dword.
1252 */
1253DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1254{
1255 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1256 if (rcStrict == VINF_SUCCESS)
1257 {
1258 uint8_t offOpcode = pIemCpu->offOpcode;
1259 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1260 pIemCpu->abOpcode[offOpcode + 1],
1261 pIemCpu->abOpcode[offOpcode + 2],
1262 pIemCpu->abOpcode[offOpcode + 3]);
1263 pIemCpu->offOpcode = offOpcode + 4;
1264 }
1265 else
1266 *pu64 = 0;
1267 return rcStrict;
1268}
1269
1270
1271/**
1272 * Fetches the next opcode dword, zero extending it to a quad word.
1273 *
1274 * @returns Strict VBox status code.
1275 * @param pIemCpu The IEM state.
1276 * @param pu64 Where to return the opcode quad word.
1277 */
1278DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1279{
1280 uint8_t const offOpcode = pIemCpu->offOpcode;
1281 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1282 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1283
1284 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1285 pIemCpu->abOpcode[offOpcode + 1],
1286 pIemCpu->abOpcode[offOpcode + 2],
1287 pIemCpu->abOpcode[offOpcode + 3]);
1288 pIemCpu->offOpcode = offOpcode + 4;
1289 return VINF_SUCCESS;
1290}
1291
1292
1293/**
1294 * Fetches the next opcode dword and zero extends it to a quad word, returns
1295 * automatically on failure.
1296 *
1297 * @param a_pu64 Where to return the opcode quad word.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed double word from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi32 Where to return the signed double word.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1317{
1318 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1319}
1320
1321/**
1322 * Fetches the next signed double word from the opcode stream, returning
1323 * automatically on failure.
1324 *
1325 * @param pi32 Where to return the signed double word.
1326 * @remark Implicitly references pIemCpu.
1327 */
1328#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1329 do \
1330 { \
1331 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1332 if (rcStrict2 != VINF_SUCCESS) \
1333 return rcStrict2; \
1334 } while (0)
1335
1336
1337/**
1338 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1339 *
1340 * @returns Strict VBox status code.
1341 * @param pIemCpu The IEM state.
1342 * @param pu64 Where to return the opcode qword.
1343 */
1344DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1345{
1346 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1347 if (rcStrict == VINF_SUCCESS)
1348 {
1349 uint8_t offOpcode = pIemCpu->offOpcode;
1350 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1351 pIemCpu->abOpcode[offOpcode + 1],
1352 pIemCpu->abOpcode[offOpcode + 2],
1353 pIemCpu->abOpcode[offOpcode + 3]);
1354 pIemCpu->offOpcode = offOpcode + 4;
1355 }
1356 else
1357 *pu64 = 0;
1358 return rcStrict;
1359}
1360
1361
1362/**
1363 * Fetches the next opcode dword, sign extending it into a quad word.
1364 *
1365 * @returns Strict VBox status code.
1366 * @param pIemCpu The IEM state.
1367 * @param pu64 Where to return the opcode quad word.
1368 */
1369DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1370{
1371 uint8_t const offOpcode = pIemCpu->offOpcode;
1372 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1373 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1374
1375 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1376 pIemCpu->abOpcode[offOpcode + 1],
1377 pIemCpu->abOpcode[offOpcode + 2],
1378 pIemCpu->abOpcode[offOpcode + 3]);
1379 *pu64 = i32;
1380 pIemCpu->offOpcode = offOpcode + 4;
1381 return VINF_SUCCESS;
1382}
1383
1384
1385/**
1386 * Fetches the next opcode double word and sign extends it to a quad word,
1387 * returns automatically on failure.
1388 *
1389 * @param a_pu64 Where to return the opcode quad word.
1390 * @remark Implicitly references pIemCpu.
1391 */
1392#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1393 do \
1394 { \
1395 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1396 if (rcStrict2 != VINF_SUCCESS) \
1397 return rcStrict2; \
1398 } while (0)
1399
1400
1401/**
1402 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1403 *
1404 * @returns Strict VBox status code.
1405 * @param pIemCpu The IEM state.
1406 * @param pu64 Where to return the opcode qword.
1407 */
1408DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1409{
1410 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1411 if (rcStrict == VINF_SUCCESS)
1412 {
1413 uint8_t offOpcode = pIemCpu->offOpcode;
1414 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1415 pIemCpu->abOpcode[offOpcode + 1],
1416 pIemCpu->abOpcode[offOpcode + 2],
1417 pIemCpu->abOpcode[offOpcode + 3],
1418 pIemCpu->abOpcode[offOpcode + 4],
1419 pIemCpu->abOpcode[offOpcode + 5],
1420 pIemCpu->abOpcode[offOpcode + 6],
1421 pIemCpu->abOpcode[offOpcode + 7]);
1422 pIemCpu->offOpcode = offOpcode + 8;
1423 }
1424 else
1425 *pu64 = 0;
1426 return rcStrict;
1427}
1428
1429
1430/**
1431 * Fetches the next opcode qword.
1432 *
1433 * @returns Strict VBox status code.
1434 * @param pIemCpu The IEM state.
1435 * @param pu64 Where to return the opcode qword.
1436 */
1437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1438{
1439 uint8_t const offOpcode = pIemCpu->offOpcode;
1440 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1441 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1442
1443 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1444 pIemCpu->abOpcode[offOpcode + 1],
1445 pIemCpu->abOpcode[offOpcode + 2],
1446 pIemCpu->abOpcode[offOpcode + 3],
1447 pIemCpu->abOpcode[offOpcode + 4],
1448 pIemCpu->abOpcode[offOpcode + 5],
1449 pIemCpu->abOpcode[offOpcode + 6],
1450 pIemCpu->abOpcode[offOpcode + 7]);
1451 pIemCpu->offOpcode = offOpcode + 8;
1452 return VINF_SUCCESS;
1453}
1454
1455
1456/**
1457 * Fetches the next opcode quad word, returns automatically on failure.
1458 *
1459 * @param a_pu64 Where to return the opcode quad word.
1460 * @remark Implicitly references pIemCpu.
1461 */
1462#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1463 do \
1464 { \
1465 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1466 if (rcStrict2 != VINF_SUCCESS) \
1467 return rcStrict2; \
1468 } while (0)
1469
1470
1471/** @name Misc Worker Functions.
1472 * @{
1473 */
1474
1475
1476/**
1477 * Validates a new SS segment.
1478 *
1479 * @returns VBox strict status code.
1480 * @param pIemCpu The IEM per CPU instance data.
1481 * @param pCtx The CPU context.
1482 * @param NewSS The new SS selctor.
1483 * @param uCpl The CPL to load the stack for.
1484 * @param pDesc Where to return the descriptor.
1485 */
1486static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1487{
1488 NOREF(pCtx);
1489
1490 /* Null selectors are not allowed (we're not called for dispatching
1491 interrupts with SS=0 in long mode). */
1492 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1493 {
1494 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1495 return iemRaiseGeneralProtectionFault0(pIemCpu);
1496 }
1497
1498 /*
1499 * Read the descriptor.
1500 */
1501 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1502 if (rcStrict != VINF_SUCCESS)
1503 return rcStrict;
1504
1505 /*
1506 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1507 */
1508 if (!pDesc->Legacy.Gen.u1DescType)
1509 {
1510 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1511 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1512 }
1513
1514 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1515 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1516 {
1517 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1518 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1519 }
1520 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1521 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1522 {
1523 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1525 }
1526 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1527 if ((NewSS & X86_SEL_RPL) != uCpl)
1528 {
1529 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1530 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1531 }
1532 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1533 {
1534 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1535 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1536 }
1537
1538 /* Is it there? */
1539 /** @todo testcase: Is this checked before the canonical / limit check below? */
1540 if (!pDesc->Legacy.Gen.u1Present)
1541 {
1542 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1543 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1544 }
1545
1546 return VINF_SUCCESS;
1547}
1548
1549
1550/** @} */
1551
1552/** @name Raising Exceptions.
1553 *
1554 * @{
1555 */
1556
1557/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1558 * @{ */
1559/** CPU exception. */
1560#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1561/** External interrupt (from PIC, APIC, whatever). */
1562#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1563/** Software interrupt (int, into or bound). */
1564#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1565/** Takes an error code. */
1566#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1567/** Takes a CR2. */
1568#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1569/** Generated by the breakpoint instruction. */
1570#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1571/** @} */
1572
1573/**
1574 * Loads the specified stack far pointer from the TSS.
1575 *
1576 * @returns VBox strict status code.
1577 * @param pIemCpu The IEM per CPU instance data.
1578 * @param pCtx The CPU context.
1579 * @param uCpl The CPL to load the stack for.
1580 * @param pSelSS Where to return the new stack segment.
1581 * @param puEsp Where to return the new stack pointer.
1582 */
1583static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1584 PRTSEL pSelSS, uint32_t *puEsp)
1585{
1586 VBOXSTRICTRC rcStrict;
1587 Assert(uCpl < 4);
1588 *puEsp = 0; /* make gcc happy */
1589 *pSelSS = 0; /* make gcc happy */
1590
1591 switch (pCtx->trHid.Attr.n.u4Type)
1592 {
1593 /*
1594 * 16-bit TSS (X86TSS16).
1595 */
1596 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1597 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1598 {
1599 uint32_t off = uCpl * 4 + 2;
1600 if (off + 4 > pCtx->trHid.u32Limit)
1601 {
1602 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1603 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1604 }
1605
1606 uint32_t u32Tmp = 0; /* gcc maybe... */
1607 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1608 if (rcStrict == VINF_SUCCESS)
1609 {
1610 *puEsp = RT_LOWORD(u32Tmp);
1611 *pSelSS = RT_HIWORD(u32Tmp);
1612 return VINF_SUCCESS;
1613 }
1614 break;
1615 }
1616
1617 /*
1618 * 32-bit TSS (X86TSS32).
1619 */
1620 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1621 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1622 {
1623 uint32_t off = uCpl * 8 + 4;
1624 if (off + 7 > pCtx->trHid.u32Limit)
1625 {
1626 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1627 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1628 }
1629
1630 uint64_t u64Tmp;
1631 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1632 if (rcStrict == VINF_SUCCESS)
1633 {
1634 *puEsp = u64Tmp & UINT32_MAX;
1635 *pSelSS = (RTSEL)(u64Tmp >> 32);
1636 return VINF_SUCCESS;
1637 }
1638 break;
1639 }
1640
1641 default:
1642 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1643 }
1644 return rcStrict;
1645}
1646
1647
1648/**
1649 * Adjust the CPU state according to the exception being raised.
1650 *
1651 * @param pCtx The CPU context.
1652 * @param u8Vector The exception that has been raised.
1653 */
1654DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1655{
1656 switch (u8Vector)
1657 {
1658 case X86_XCPT_DB:
1659 pCtx->dr[7] &= ~X86_DR7_GD;
1660 break;
1661 /** @todo Read the AMD and Intel exception reference... */
1662 }
1663}
1664
1665
1666/**
1667 * Implements exceptions and interrupts for real mode.
1668 *
1669 * @returns VBox strict status code.
1670 * @param pIemCpu The IEM per CPU instance data.
1671 * @param pCtx The CPU context.
1672 * @param cbInstr The number of bytes to offset rIP by in the return
1673 * address.
1674 * @param u8Vector The interrupt / exception vector number.
1675 * @param fFlags The flags.
1676 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1677 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1678 */
1679static VBOXSTRICTRC
1680iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1681 PCPUMCTX pCtx,
1682 uint8_t cbInstr,
1683 uint8_t u8Vector,
1684 uint32_t fFlags,
1685 uint16_t uErr,
1686 uint64_t uCr2)
1687{
1688 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1689 NOREF(uErr); NOREF(uCr2);
1690
1691 /*
1692 * Read the IDT entry.
1693 */
1694 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1695 {
1696 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1697 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1698 }
1699 RTFAR16 Idte;
1700 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1701 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1702 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1703 return rcStrict;
1704
1705 /*
1706 * Push the stack frame.
1707 */
1708 uint16_t *pu16Frame;
1709 uint64_t uNewRsp;
1710 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1711 if (rcStrict != VINF_SUCCESS)
1712 return rcStrict;
1713
1714 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1715 pu16Frame[1] = (uint16_t)pCtx->cs;
1716 pu16Frame[0] = pCtx->ip + cbInstr;
1717 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1718 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1719 return rcStrict;
1720
1721 /*
1722 * Load the vector address into cs:ip and make exception specific state
1723 * adjustments.
1724 */
1725 pCtx->cs = Idte.sel;
1726 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1727 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1728 pCtx->rip = Idte.off;
1729 pCtx->eflags.Bits.u1IF = 0;
1730
1731 /** @todo do we actually do this in real mode? */
1732 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1733 iemRaiseXcptAdjustState(pCtx, u8Vector);
1734
1735 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1736}
1737
1738
1739/**
1740 * Implements exceptions and interrupts for protected mode.
1741 *
1742 * @returns VBox strict status code.
1743 * @param pIemCpu The IEM per CPU instance data.
1744 * @param pCtx The CPU context.
1745 * @param cbInstr The number of bytes to offset rIP by in the return
1746 * address.
1747 * @param u8Vector The interrupt / exception vector number.
1748 * @param fFlags The flags.
1749 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1750 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1751 */
1752static VBOXSTRICTRC
1753iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1754 PCPUMCTX pCtx,
1755 uint8_t cbInstr,
1756 uint8_t u8Vector,
1757 uint32_t fFlags,
1758 uint16_t uErr,
1759 uint64_t uCr2)
1760{
1761 NOREF(cbInstr);
1762
1763 /*
1764 * Read the IDT entry.
1765 */
1766 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1767 {
1768 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1769 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1770 }
1771 X86DESC Idte;
1772 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1773 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1774 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1775 return rcStrict;
1776 LogFlow(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
1777 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1778 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
1779
1780 /*
1781 * Check the descriptor type, DPL and such.
1782 * ASSUMES this is done in the same order as described for call-gate calls.
1783 */
1784 if (Idte.Gate.u1DescType)
1785 {
1786 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1787 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1788 }
1789 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1790 switch (Idte.Gate.u4Type)
1791 {
1792 case X86_SEL_TYPE_SYS_UNDEFINED:
1793 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1794 case X86_SEL_TYPE_SYS_LDT:
1795 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1796 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1797 case X86_SEL_TYPE_SYS_UNDEFINED2:
1798 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1799 case X86_SEL_TYPE_SYS_UNDEFINED3:
1800 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1801 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1802 case X86_SEL_TYPE_SYS_UNDEFINED4:
1803 {
1804 /** @todo check what actually happens when the type is wrong...
1805 * esp. call gates. */
1806 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1807 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1808 }
1809
1810 case X86_SEL_TYPE_SYS_286_INT_GATE:
1811 case X86_SEL_TYPE_SYS_386_INT_GATE:
1812 fEflToClear |= X86_EFL_IF;
1813 break;
1814
1815 case X86_SEL_TYPE_SYS_TASK_GATE:
1816 /** @todo task gates. */
1817 AssertFailedReturn(VERR_NOT_SUPPORTED);
1818
1819 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1820 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1821 break;
1822
1823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1824 }
1825
1826 /* Check DPL against CPL if applicable. */
1827 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1828 {
1829 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1830 {
1831 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1832 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1833 }
1834 }
1835
1836 /* Is it there? */
1837 if (!Idte.Gate.u1Present)
1838 {
1839 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1840 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1841 }
1842
1843 /* A null CS is bad. */
1844 RTSEL NewCS = Idte.Gate.u16Sel;
1845 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1846 {
1847 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1848 return iemRaiseGeneralProtectionFault0(pIemCpu);
1849 }
1850
1851 /* Fetch the descriptor for the new CS. */
1852 IEMSELDESC DescCS;
1853 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1854 if (rcStrict != VINF_SUCCESS)
1855 {
1856 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1857 return rcStrict;
1858 }
1859
1860 /* Must be a code segment. */
1861 if (!DescCS.Legacy.Gen.u1DescType)
1862 {
1863 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1864 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1865 }
1866 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1867 {
1868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1869 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1870 }
1871
1872 /* Don't allow lowering the privilege level. */
1873 /** @todo Does the lowering of privileges apply to software interrupts
1874 * only? This has bearings on the more-privileged or
1875 * same-privilege stack behavior further down. A testcase would
1876 * be nice. */
1877 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1878 {
1879 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1880 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1881 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1882 }
1883 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1884
1885 /* Check the new EIP against the new CS limit. */
1886 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1887 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1888 ? Idte.Gate.u16OffsetLow
1889 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1890 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1891 if (DescCS.Legacy.Gen.u1Granularity)
1892 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1893 if (uNewEip > cbLimitCS)
1894 {
1895 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1896 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1897 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1898 }
1899
1900 /* Make sure the selector is present. */
1901 if (!DescCS.Legacy.Gen.u1Present)
1902 {
1903 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1904 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1905 }
1906
1907 /*
1908 * If the privilege level changes, we need to get a new stack from the TSS.
1909 * This in turns means validating the new SS and ESP...
1910 */
1911 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1912 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1913 if (uNewCpl != pIemCpu->uCpl)
1914 {
1915 RTSEL NewSS;
1916 uint32_t uNewEsp;
1917 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1918 if (rcStrict != VINF_SUCCESS)
1919 return rcStrict;
1920
1921 IEMSELDESC DescSS;
1922 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925
1926 /* Check that there is sufficient space for the stack frame. */
1927 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1928 if (DescSS.Legacy.Gen.u1Granularity)
1929 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1930 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1931
1932 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1933 if ( uNewEsp - 1 > cbLimitSS
1934 || uNewEsp < cbStackFrame)
1935 {
1936 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1937 u8Vector, NewSS, uNewEsp, cbStackFrame));
1938 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1939 }
1940
1941 /*
1942 * Start making changes.
1943 */
1944
1945 /* Create the stack frame. */
1946 RTPTRUNION uStackFrame;
1947 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1948 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1949 if (rcStrict != VINF_SUCCESS)
1950 return rcStrict;
1951 void * const pvStackFrame = uStackFrame.pv;
1952
1953 if (fFlags & IEM_XCPT_FLAGS_ERR)
1954 *uStackFrame.pu32++ = uErr;
1955 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1956 ? pCtx->eip + cbInstr : pCtx->eip;
1957 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1958 uStackFrame.pu32[2] = pCtx->eflags.u;
1959 uStackFrame.pu32[3] = pCtx->esp;
1960 uStackFrame.pu32[4] = pCtx->ss;
1961 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1962 if (rcStrict != VINF_SUCCESS)
1963 return rcStrict;
1964
1965 /* Mark the selectors 'accessed' (hope this is the correct time). */
1966 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1967 * after pushing the stack frame? (Write protect the gdt + stack to
1968 * find out.) */
1969 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1970 {
1971 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1972 if (rcStrict != VINF_SUCCESS)
1973 return rcStrict;
1974 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1975 }
1976
1977 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1978 {
1979 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1980 if (rcStrict != VINF_SUCCESS)
1981 return rcStrict;
1982 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1983 }
1984
1985 /*
1986 * Start commint the register changes (joins with the DPL=CPL branch).
1987 */
1988 pCtx->ss = NewSS;
1989 pCtx->ssHid.u32Limit = cbLimitSS;
1990 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1991 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1992 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1993 pIemCpu->uCpl = uNewCpl;
1994 }
1995 /*
1996 * Same privilege, no stack change and smaller stack frame.
1997 */
1998 else
1999 {
2000 uint64_t uNewRsp;
2001 RTPTRUNION uStackFrame;
2002 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2003 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2004 if (rcStrict != VINF_SUCCESS)
2005 return rcStrict;
2006 void * const pvStackFrame = uStackFrame.pv;
2007
2008 if (fFlags & IEM_XCPT_FLAGS_ERR)
2009 *uStackFrame.pu32++ = uErr;
2010 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2011 ? pCtx->eip + cbInstr : pCtx->eip;
2012 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
2013 uStackFrame.pu32[2] = pCtx->eflags.u;
2014 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2015 if (rcStrict != VINF_SUCCESS)
2016 return rcStrict;
2017
2018 /* Mark the CS selector as 'accessed'. */
2019 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2020 {
2021 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2022 if (rcStrict != VINF_SUCCESS)
2023 return rcStrict;
2024 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2025 }
2026
2027 /*
2028 * Start committing the register changes (joins with the other branch).
2029 */
2030 pCtx->rsp = uNewRsp;
2031 }
2032
2033 /* ... register committing continues. */
2034 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2035 pCtx->csHid.u32Limit = cbLimitCS;
2036 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2037 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2038
2039 pCtx->rip = uNewEip;
2040 pCtx->rflags.u &= ~fEflToClear;
2041
2042 if (fFlags & IEM_XCPT_FLAGS_CR2)
2043 pCtx->cr2 = uCr2;
2044
2045 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2046 iemRaiseXcptAdjustState(pCtx, u8Vector);
2047
2048 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2049}
2050
2051
2052/**
2053 * Implements exceptions and interrupts for V8086 mode.
2054 *
2055 * @returns VBox strict status code.
2056 * @param pIemCpu The IEM per CPU instance data.
2057 * @param pCtx The CPU context.
2058 * @param cbInstr The number of bytes to offset rIP by in the return
2059 * address.
2060 * @param u8Vector The interrupt / exception vector number.
2061 * @param fFlags The flags.
2062 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2063 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2064 */
2065static VBOXSTRICTRC
2066iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2067 PCPUMCTX pCtx,
2068 uint8_t cbInstr,
2069 uint8_t u8Vector,
2070 uint32_t fFlags,
2071 uint16_t uErr,
2072 uint64_t uCr2)
2073{
2074 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2075 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2076 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2077}
2078
2079
2080/**
2081 * Implements exceptions and interrupts for long mode.
2082 *
2083 * @returns VBox strict status code.
2084 * @param pIemCpu The IEM per CPU instance data.
2085 * @param pCtx The CPU context.
2086 * @param cbInstr The number of bytes to offset rIP by in the return
2087 * address.
2088 * @param u8Vector The interrupt / exception vector number.
2089 * @param fFlags The flags.
2090 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2091 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2092 */
2093static VBOXSTRICTRC
2094iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2095 PCPUMCTX pCtx,
2096 uint8_t cbInstr,
2097 uint8_t u8Vector,
2098 uint32_t fFlags,
2099 uint16_t uErr,
2100 uint64_t uCr2)
2101{
2102 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2103 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2104 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2105}
2106
2107
2108/**
2109 * Implements exceptions and interrupts.
2110 *
2111 * All exceptions and interrupts goes thru this function!
2112 *
2113 * @returns VBox strict status code.
2114 * @param pIemCpu The IEM per CPU instance data.
2115 * @param cbInstr The number of bytes to offset rIP by in the return
2116 * address.
2117 * @param u8Vector The interrupt / exception vector number.
2118 * @param fFlags The flags.
2119 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2120 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2121 */
2122DECL_NO_INLINE(static, VBOXSTRICTRC)
2123iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2124 uint8_t cbInstr,
2125 uint8_t u8Vector,
2126 uint32_t fFlags,
2127 uint16_t uErr,
2128 uint64_t uCr2)
2129{
2130 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2131
2132 /*
2133 * Do recursion accounting.
2134 */
2135 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2136 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2137 if (pIemCpu->cXcptRecursions == 0)
2138 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2139 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2140 else
2141 {
2142 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2143 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2144
2145 /** @todo double and tripple faults. */
2146 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2147
2148 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2149 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2150 {
2151 ....
2152 } */
2153 }
2154 pIemCpu->cXcptRecursions++;
2155 pIemCpu->uCurXcpt = u8Vector;
2156 pIemCpu->fCurXcpt = fFlags;
2157
2158 /*
2159 * Extensive logging.
2160 */
2161#ifdef LOG_ENABLED
2162 if (LogIs3Enabled())
2163 {
2164 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2165 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2166 char szRegs[4096];
2167 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2168 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2169 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2170 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2171 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2172 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2173 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2174 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2175 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2176 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2177 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2178 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2179 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2180 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2181 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2182 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2183 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2184 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2185 " efer=%016VR{efer}\n"
2186 " pat=%016VR{pat}\n"
2187 " sf_mask=%016VR{sf_mask}\n"
2188 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2189 " lstar=%016VR{lstar}\n"
2190 " star=%016VR{star} cstar=%016VR{cstar}\n"
2191 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2192 );
2193
2194 char szInstr[256];
2195 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2196 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2197 szInstr, sizeof(szInstr), NULL);
2198 Log3(("%s%s\n", szRegs, szInstr));
2199 }
2200#endif /* LOG_ENABLED */
2201
2202 /*
2203 * Call the mode specific worker function.
2204 */
2205 VBOXSTRICTRC rcStrict;
2206 if (!(pCtx->cr0 & X86_CR0_PE))
2207 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2208 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2209 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2210 else if (!pCtx->eflags.Bits.u1VM)
2211 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2212 else
2213 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2214
2215 /*
2216 * Unwind.
2217 */
2218 pIemCpu->cXcptRecursions--;
2219 pIemCpu->uCurXcpt = uPrevXcpt;
2220 pIemCpu->fCurXcpt = fPrevXcpt;
2221 LogFlow(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2222 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
2223 return rcStrict;
2224}
2225
2226
2227/** \#DE - 00. */
2228DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2229{
2230 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2231}
2232
2233
2234/** \#DB - 01. */
2235DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2236{
2237 /** @todo set/clear RF. */
2238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2239}
2240
2241
2242/** \#UD - 06. */
2243DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2244{
2245 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2246}
2247
2248
2249/** \#NM - 07. */
2250DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2251{
2252 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2253}
2254
2255
2256#ifdef SOME_UNUSED_FUNCTION
2257/** \#TS(err) - 0a. */
2258DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2259{
2260 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2261}
2262#endif
2263
2264
2265/** \#TS(tr) - 0a. */
2266DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2267{
2268 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2269 pIemCpu->CTX_SUFF(pCtx)->tr, 0);
2270}
2271
2272
2273/** \#NP(err) - 0b. */
2274DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2275{
2276 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2277}
2278
2279
2280/** \#NP(seg) - 0b. */
2281DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2282{
2283 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2284 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2285}
2286
2287
2288/** \#NP(sel) - 0b. */
2289DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2290{
2291 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2292 uSel & ~X86_SEL_RPL, 0);
2293}
2294
2295
2296/** \#SS(seg) - 0c. */
2297DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2298{
2299 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2300 uSel & ~X86_SEL_RPL, 0);
2301}
2302
2303
2304/** \#GP(n) - 0d. */
2305DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2306{
2307 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2308}
2309
2310
2311/** \#GP(0) - 0d. */
2312DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2313{
2314 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2315}
2316
2317
2318/** \#GP(sel) - 0d. */
2319DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2320{
2321 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2322 Sel & ~X86_SEL_RPL, 0);
2323}
2324
2325
2326/** \#GP(0) - 0d. */
2327DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2328{
2329 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2330}
2331
2332
2333/** \#GP(sel) - 0d. */
2334DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2335{
2336 NOREF(iSegReg); NOREF(fAccess);
2337 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2338 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2339}
2340
2341
2342/** \#GP(sel) - 0d. */
2343DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2344{
2345 NOREF(Sel);
2346 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2347}
2348
2349
2350/** \#GP(sel) - 0d. */
2351DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2352{
2353 NOREF(iSegReg); NOREF(fAccess);
2354 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2355}
2356
2357
2358/** \#PF(n) - 0e. */
2359DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2360{
2361 uint16_t uErr;
2362 switch (rc)
2363 {
2364 case VERR_PAGE_NOT_PRESENT:
2365 case VERR_PAGE_TABLE_NOT_PRESENT:
2366 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2367 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2368 uErr = 0;
2369 break;
2370
2371 default:
2372 AssertMsgFailed(("%Rrc\n", rc));
2373 case VERR_ACCESS_DENIED:
2374 uErr = X86_TRAP_PF_P;
2375 break;
2376
2377 /** @todo reserved */
2378 }
2379
2380 if (pIemCpu->uCpl == 3)
2381 uErr |= X86_TRAP_PF_US;
2382
2383 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2384 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2385 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2386 uErr |= X86_TRAP_PF_ID;
2387
2388 /* Note! RW access callers reporting a WRITE protection fault, will clear
2389 the READ flag before calling. So, read-modify-write accesses (RW)
2390 can safely be reported as READ faults. */
2391 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2392 uErr |= X86_TRAP_PF_RW;
2393
2394 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2395 uErr, GCPtrWhere);
2396}
2397
2398
2399/** \#MF(0) - 10. */
2400DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2401{
2402 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2403}
2404
2405
2406/** \#AC(0) - 11. */
2407DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2408{
2409 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2410}
2411
2412
2413/**
2414 * Macro for calling iemCImplRaiseDivideError().
2415 *
2416 * This enables us to add/remove arguments and force different levels of
2417 * inlining as we wish.
2418 *
2419 * @return Strict VBox status code.
2420 */
2421#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2422IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2423{
2424 NOREF(cbInstr);
2425 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2426}
2427
2428
2429/**
2430 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2431 *
2432 * This enables us to add/remove arguments and force different levels of
2433 * inlining as we wish.
2434 *
2435 * @return Strict VBox status code.
2436 */
2437#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2438IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2439{
2440 NOREF(cbInstr);
2441 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2442}
2443
2444
2445/**
2446 * Macro for calling iemCImplRaiseInvalidOpcode().
2447 *
2448 * This enables us to add/remove arguments and force different levels of
2449 * inlining as we wish.
2450 *
2451 * @return Strict VBox status code.
2452 */
2453#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2454IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2455{
2456 NOREF(cbInstr);
2457 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2458}
2459
2460
2461/** @} */
2462
2463
2464/*
2465 *
2466 * Helpers routines.
2467 * Helpers routines.
2468 * Helpers routines.
2469 *
2470 */
2471
2472/**
2473 * Recalculates the effective operand size.
2474 *
2475 * @param pIemCpu The IEM state.
2476 */
2477static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2478{
2479 switch (pIemCpu->enmCpuMode)
2480 {
2481 case IEMMODE_16BIT:
2482 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2483 break;
2484 case IEMMODE_32BIT:
2485 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2486 break;
2487 case IEMMODE_64BIT:
2488 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2489 {
2490 case 0:
2491 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2492 break;
2493 case IEM_OP_PRF_SIZE_OP:
2494 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2495 break;
2496 case IEM_OP_PRF_SIZE_REX_W:
2497 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2498 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2499 break;
2500 }
2501 break;
2502 default:
2503 AssertFailed();
2504 }
2505}
2506
2507
2508/**
2509 * Sets the default operand size to 64-bit and recalculates the effective
2510 * operand size.
2511 *
2512 * @param pIemCpu The IEM state.
2513 */
2514static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2515{
2516 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2517 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2518 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2519 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2520 else
2521 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2522}
2523
2524
2525/*
2526 *
2527 * Common opcode decoders.
2528 * Common opcode decoders.
2529 * Common opcode decoders.
2530 *
2531 */
2532#include <iprt/mem.h>
2533
2534/**
2535 * Used to add extra details about a stub case.
2536 * @param pIemCpu The IEM per CPU state.
2537 */
2538static void iemOpStubMsg2(PIEMCPU pIemCpu)
2539{
2540 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2541 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2542 char szRegs[4096];
2543 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2544 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2545 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2546 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2547 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2548 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2549 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2550 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2551 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2552 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2553 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2554 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2555 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2556 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2557 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2558 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2559 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2560 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2561 " efer=%016VR{efer}\n"
2562 " pat=%016VR{pat}\n"
2563 " sf_mask=%016VR{sf_mask}\n"
2564 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2565 " lstar=%016VR{lstar}\n"
2566 " star=%016VR{star} cstar=%016VR{cstar}\n"
2567 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2568 );
2569
2570 char szInstr[256];
2571 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2572 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2573 szInstr, sizeof(szInstr), NULL);
2574
2575 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2576}
2577
2578
2579/** Stubs an opcode. */
2580#define FNIEMOP_STUB(a_Name) \
2581 FNIEMOP_DEF(a_Name) \
2582 { \
2583 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2584 iemOpStubMsg2(pIemCpu); \
2585 RTAssertPanic(); \
2586 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2587 } \
2588 typedef int ignore_semicolon
2589
2590/** Stubs an opcode. */
2591#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2592 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2593 { \
2594 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2595 iemOpStubMsg2(pIemCpu); \
2596 RTAssertPanic(); \
2597 NOREF(a_Name0); \
2598 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2599 } \
2600 typedef int ignore_semicolon
2601
2602/** Stubs an opcode which currently should raise \#UD. */
2603#define FNIEMOP_UD_STUB(a_Name) \
2604 FNIEMOP_DEF(a_Name) \
2605 { \
2606 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2607 return IEMOP_RAISE_INVALID_OPCODE(); \
2608 } \
2609 typedef int ignore_semicolon
2610
2611/** Stubs an opcode which currently should raise \#UD. */
2612#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2613 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2614 { \
2615 NOREF(a_Name0); \
2616 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2617 return IEMOP_RAISE_INVALID_OPCODE(); \
2618 } \
2619 typedef int ignore_semicolon
2620
2621
2622
2623/** @name Register Access.
2624 * @{
2625 */
2626
2627/**
2628 * Gets a reference (pointer) to the specified hidden segment register.
2629 *
2630 * @returns Hidden register reference.
2631 * @param pIemCpu The per CPU data.
2632 * @param iSegReg The segment register.
2633 */
2634static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2635{
2636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2637 switch (iSegReg)
2638 {
2639 case X86_SREG_ES: return &pCtx->esHid;
2640 case X86_SREG_CS: return &pCtx->csHid;
2641 case X86_SREG_SS: return &pCtx->ssHid;
2642 case X86_SREG_DS: return &pCtx->dsHid;
2643 case X86_SREG_FS: return &pCtx->fsHid;
2644 case X86_SREG_GS: return &pCtx->gsHid;
2645 }
2646 AssertFailedReturn(NULL);
2647}
2648
2649
2650/**
2651 * Gets a reference (pointer) to the specified segment register (the selector
2652 * value).
2653 *
2654 * @returns Pointer to the selector variable.
2655 * @param pIemCpu The per CPU data.
2656 * @param iSegReg The segment register.
2657 */
2658static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2659{
2660 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2661 switch (iSegReg)
2662 {
2663 case X86_SREG_ES: return &pCtx->es;
2664 case X86_SREG_CS: return &pCtx->cs;
2665 case X86_SREG_SS: return &pCtx->ss;
2666 case X86_SREG_DS: return &pCtx->ds;
2667 case X86_SREG_FS: return &pCtx->fs;
2668 case X86_SREG_GS: return &pCtx->gs;
2669 }
2670 AssertFailedReturn(NULL);
2671}
2672
2673
2674/**
2675 * Fetches the selector value of a segment register.
2676 *
2677 * @returns The selector value.
2678 * @param pIemCpu The per CPU data.
2679 * @param iSegReg The segment register.
2680 */
2681static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2682{
2683 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2684 switch (iSegReg)
2685 {
2686 case X86_SREG_ES: return pCtx->es;
2687 case X86_SREG_CS: return pCtx->cs;
2688 case X86_SREG_SS: return pCtx->ss;
2689 case X86_SREG_DS: return pCtx->ds;
2690 case X86_SREG_FS: return pCtx->fs;
2691 case X86_SREG_GS: return pCtx->gs;
2692 }
2693 AssertFailedReturn(0xffff);
2694}
2695
2696
2697/**
2698 * Gets a reference (pointer) to the specified general register.
2699 *
2700 * @returns Register reference.
2701 * @param pIemCpu The per CPU data.
2702 * @param iReg The general register.
2703 */
2704static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2705{
2706 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2707 switch (iReg)
2708 {
2709 case X86_GREG_xAX: return &pCtx->rax;
2710 case X86_GREG_xCX: return &pCtx->rcx;
2711 case X86_GREG_xDX: return &pCtx->rdx;
2712 case X86_GREG_xBX: return &pCtx->rbx;
2713 case X86_GREG_xSP: return &pCtx->rsp;
2714 case X86_GREG_xBP: return &pCtx->rbp;
2715 case X86_GREG_xSI: return &pCtx->rsi;
2716 case X86_GREG_xDI: return &pCtx->rdi;
2717 case X86_GREG_x8: return &pCtx->r8;
2718 case X86_GREG_x9: return &pCtx->r9;
2719 case X86_GREG_x10: return &pCtx->r10;
2720 case X86_GREG_x11: return &pCtx->r11;
2721 case X86_GREG_x12: return &pCtx->r12;
2722 case X86_GREG_x13: return &pCtx->r13;
2723 case X86_GREG_x14: return &pCtx->r14;
2724 case X86_GREG_x15: return &pCtx->r15;
2725 }
2726 AssertFailedReturn(NULL);
2727}
2728
2729
2730/**
2731 * Gets a reference (pointer) to the specified 8-bit general register.
2732 *
2733 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2734 *
2735 * @returns Register reference.
2736 * @param pIemCpu The per CPU data.
2737 * @param iReg The register.
2738 */
2739static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2740{
2741 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2742 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2743
2744 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2745 if (iReg >= 4)
2746 pu8Reg++;
2747 return pu8Reg;
2748}
2749
2750
2751/**
2752 * Fetches the value of a 8-bit general register.
2753 *
2754 * @returns The register value.
2755 * @param pIemCpu The per CPU data.
2756 * @param iReg The register.
2757 */
2758static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2759{
2760 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2761 return *pbSrc;
2762}
2763
2764
2765/**
2766 * Fetches the value of a 16-bit general register.
2767 *
2768 * @returns The register value.
2769 * @param pIemCpu The per CPU data.
2770 * @param iReg The register.
2771 */
2772static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2773{
2774 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2775}
2776
2777
2778/**
2779 * Fetches the value of a 32-bit general register.
2780 *
2781 * @returns The register value.
2782 * @param pIemCpu The per CPU data.
2783 * @param iReg The register.
2784 */
2785static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2786{
2787 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2788}
2789
2790
2791/**
2792 * Fetches the value of a 64-bit general register.
2793 *
2794 * @returns The register value.
2795 * @param pIemCpu The per CPU data.
2796 * @param iReg The register.
2797 */
2798static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2799{
2800 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2801}
2802
2803
2804/**
2805 * Is the FPU state in FXSAVE format or not.
2806 *
2807 * @returns true if it is, false if it's in FNSAVE.
2808 * @param pVCpu Pointer to the VMCPU.
2809 */
2810DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2811{
2812#ifdef RT_ARCH_AMD64
2813 NOREF(pIemCpu);
2814 return true;
2815#else
2816 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2817 return true;
2818#endif
2819}
2820
2821
2822/**
2823 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2824 *
2825 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2826 * segment limit.
2827 *
2828 * @param pIemCpu The per CPU data.
2829 * @param offNextInstr The offset of the next instruction.
2830 */
2831static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2832{
2833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2834 switch (pIemCpu->enmEffOpSize)
2835 {
2836 case IEMMODE_16BIT:
2837 {
2838 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2839 if ( uNewIp > pCtx->csHid.u32Limit
2840 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2841 return iemRaiseGeneralProtectionFault0(pIemCpu);
2842 pCtx->rip = uNewIp;
2843 break;
2844 }
2845
2846 case IEMMODE_32BIT:
2847 {
2848 Assert(pCtx->rip <= UINT32_MAX);
2849 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2850
2851 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2852 if (uNewEip > pCtx->csHid.u32Limit)
2853 return iemRaiseGeneralProtectionFault0(pIemCpu);
2854 pCtx->rip = uNewEip;
2855 break;
2856 }
2857
2858 case IEMMODE_64BIT:
2859 {
2860 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2861
2862 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2863 if (!IEM_IS_CANONICAL(uNewRip))
2864 return iemRaiseGeneralProtectionFault0(pIemCpu);
2865 pCtx->rip = uNewRip;
2866 break;
2867 }
2868
2869 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2870 }
2871
2872 return VINF_SUCCESS;
2873}
2874
2875
2876/**
2877 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2878 *
2879 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2880 * segment limit.
2881 *
2882 * @returns Strict VBox status code.
2883 * @param pIemCpu The per CPU data.
2884 * @param offNextInstr The offset of the next instruction.
2885 */
2886static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2887{
2888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2889 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2890
2891 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2892 if ( uNewIp > pCtx->csHid.u32Limit
2893 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2894 return iemRaiseGeneralProtectionFault0(pIemCpu);
2895 /** @todo Test 16-bit jump in 64-bit mode. */
2896 pCtx->rip = uNewIp;
2897
2898 return VINF_SUCCESS;
2899}
2900
2901
2902/**
2903 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2904 *
2905 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2906 * segment limit.
2907 *
2908 * @returns Strict VBox status code.
2909 * @param pIemCpu The per CPU data.
2910 * @param offNextInstr The offset of the next instruction.
2911 */
2912static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2913{
2914 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2915 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2916
2917 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2918 {
2919 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2920
2921 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2922 if (uNewEip > pCtx->csHid.u32Limit)
2923 return iemRaiseGeneralProtectionFault0(pIemCpu);
2924 pCtx->rip = uNewEip;
2925 }
2926 else
2927 {
2928 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2929
2930 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2931 if (!IEM_IS_CANONICAL(uNewRip))
2932 return iemRaiseGeneralProtectionFault0(pIemCpu);
2933 pCtx->rip = uNewRip;
2934 }
2935 return VINF_SUCCESS;
2936}
2937
2938
2939/**
2940 * Performs a near jump to the specified address.
2941 *
2942 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2943 * segment limit.
2944 *
2945 * @param pIemCpu The per CPU data.
2946 * @param uNewRip The new RIP value.
2947 */
2948static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2949{
2950 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2951 switch (pIemCpu->enmEffOpSize)
2952 {
2953 case IEMMODE_16BIT:
2954 {
2955 Assert(uNewRip <= UINT16_MAX);
2956 if ( uNewRip > pCtx->csHid.u32Limit
2957 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2958 return iemRaiseGeneralProtectionFault0(pIemCpu);
2959 /** @todo Test 16-bit jump in 64-bit mode. */
2960 pCtx->rip = uNewRip;
2961 break;
2962 }
2963
2964 case IEMMODE_32BIT:
2965 {
2966 Assert(uNewRip <= UINT32_MAX);
2967 Assert(pCtx->rip <= UINT32_MAX);
2968 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2969
2970 if (uNewRip > pCtx->csHid.u32Limit)
2971 return iemRaiseGeneralProtectionFault0(pIemCpu);
2972 pCtx->rip = uNewRip;
2973 break;
2974 }
2975
2976 case IEMMODE_64BIT:
2977 {
2978 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2979
2980 if (!IEM_IS_CANONICAL(uNewRip))
2981 return iemRaiseGeneralProtectionFault0(pIemCpu);
2982 pCtx->rip = uNewRip;
2983 break;
2984 }
2985
2986 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2987 }
2988
2989 return VINF_SUCCESS;
2990}
2991
2992
2993/**
2994 * Get the address of the top of the stack.
2995 *
2996 * @param pCtx The CPU context which SP/ESP/RSP should be
2997 * read.
2998 */
2999DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3000{
3001 if (pCtx->ssHid.Attr.n.u1Long)
3002 return pCtx->rsp;
3003 if (pCtx->ssHid.Attr.n.u1DefBig)
3004 return pCtx->esp;
3005 return pCtx->sp;
3006}
3007
3008
3009/**
3010 * Updates the RIP/EIP/IP to point to the next instruction.
3011 *
3012 * @param pIemCpu The per CPU data.
3013 * @param cbInstr The number of bytes to add.
3014 */
3015static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3016{
3017 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3018 switch (pIemCpu->enmCpuMode)
3019 {
3020 case IEMMODE_16BIT:
3021 Assert(pCtx->rip <= UINT16_MAX);
3022 pCtx->eip += cbInstr;
3023 pCtx->eip &= UINT32_C(0xffff);
3024 break;
3025
3026 case IEMMODE_32BIT:
3027 pCtx->eip += cbInstr;
3028 Assert(pCtx->rip <= UINT32_MAX);
3029 break;
3030
3031 case IEMMODE_64BIT:
3032 pCtx->rip += cbInstr;
3033 break;
3034 default: AssertFailed();
3035 }
3036}
3037
3038
3039/**
3040 * Updates the RIP/EIP/IP to point to the next instruction.
3041 *
3042 * @param pIemCpu The per CPU data.
3043 */
3044static void iemRegUpdateRip(PIEMCPU pIemCpu)
3045{
3046 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3047}
3048
3049
3050/**
3051 * Adds to the stack pointer.
3052 *
3053 * @param pCtx The CPU context which SP/ESP/RSP should be
3054 * updated.
3055 * @param cbToAdd The number of bytes to add.
3056 */
3057DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3058{
3059 if (pCtx->ssHid.Attr.n.u1Long)
3060 pCtx->rsp += cbToAdd;
3061 else if (pCtx->ssHid.Attr.n.u1DefBig)
3062 pCtx->esp += cbToAdd;
3063 else
3064 pCtx->sp += cbToAdd;
3065}
3066
3067
3068/**
3069 * Subtracts from the stack pointer.
3070 *
3071 * @param pCtx The CPU context which SP/ESP/RSP should be
3072 * updated.
3073 * @param cbToSub The number of bytes to subtract.
3074 */
3075DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3076{
3077 if (pCtx->ssHid.Attr.n.u1Long)
3078 pCtx->rsp -= cbToSub;
3079 else if (pCtx->ssHid.Attr.n.u1DefBig)
3080 pCtx->esp -= cbToSub;
3081 else
3082 pCtx->sp -= cbToSub;
3083}
3084
3085
3086/**
3087 * Adds to the temporary stack pointer.
3088 *
3089 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3090 * @param cbToAdd The number of bytes to add.
3091 * @param pCtx Where to get the current stack mode.
3092 */
3093DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3094{
3095 if (pCtx->ssHid.Attr.n.u1Long)
3096 pTmpRsp->u += cbToAdd;
3097 else if (pCtx->ssHid.Attr.n.u1DefBig)
3098 pTmpRsp->DWords.dw0 += cbToAdd;
3099 else
3100 pTmpRsp->Words.w0 += cbToAdd;
3101}
3102
3103
3104/**
3105 * Subtracts from the temporary stack pointer.
3106 *
3107 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3108 * @param cbToSub The number of bytes to subtract.
3109 * @param pCtx Where to get the current stack mode.
3110 */
3111DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3112{
3113 if (pCtx->ssHid.Attr.n.u1Long)
3114 pTmpRsp->u -= cbToSub;
3115 else if (pCtx->ssHid.Attr.n.u1DefBig)
3116 pTmpRsp->DWords.dw0 -= cbToSub;
3117 else
3118 pTmpRsp->Words.w0 -= cbToSub;
3119}
3120
3121
3122/**
3123 * Calculates the effective stack address for a push of the specified size as
3124 * well as the new RSP value (upper bits may be masked).
3125 *
3126 * @returns Effective stack addressf for the push.
3127 * @param pCtx Where to get the current stack mode.
3128 * @param cbItem The size of the stack item to pop.
3129 * @param puNewRsp Where to return the new RSP value.
3130 */
3131DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3132{
3133 RTUINT64U uTmpRsp;
3134 RTGCPTR GCPtrTop;
3135 uTmpRsp.u = pCtx->rsp;
3136
3137 if (pCtx->ssHid.Attr.n.u1Long)
3138 GCPtrTop = uTmpRsp.u -= cbItem;
3139 else if (pCtx->ssHid.Attr.n.u1DefBig)
3140 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3141 else
3142 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3143 *puNewRsp = uTmpRsp.u;
3144 return GCPtrTop;
3145}
3146
3147
3148/**
3149 * Gets the current stack pointer and calculates the value after a pop of the
3150 * specified size.
3151 *
3152 * @returns Current stack pointer.
3153 * @param pCtx Where to get the current stack mode.
3154 * @param cbItem The size of the stack item to pop.
3155 * @param puNewRsp Where to return the new RSP value.
3156 */
3157DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3158{
3159 RTUINT64U uTmpRsp;
3160 RTGCPTR GCPtrTop;
3161 uTmpRsp.u = pCtx->rsp;
3162
3163 if (pCtx->ssHid.Attr.n.u1Long)
3164 {
3165 GCPtrTop = uTmpRsp.u;
3166 uTmpRsp.u += cbItem;
3167 }
3168 else if (pCtx->ssHid.Attr.n.u1DefBig)
3169 {
3170 GCPtrTop = uTmpRsp.DWords.dw0;
3171 uTmpRsp.DWords.dw0 += cbItem;
3172 }
3173 else
3174 {
3175 GCPtrTop = uTmpRsp.Words.w0;
3176 uTmpRsp.Words.w0 += cbItem;
3177 }
3178 *puNewRsp = uTmpRsp.u;
3179 return GCPtrTop;
3180}
3181
3182
3183/**
3184 * Calculates the effective stack address for a push of the specified size as
3185 * well as the new temporary RSP value (upper bits may be masked).
3186 *
3187 * @returns Effective stack addressf for the push.
3188 * @param pTmpRsp The temporary stack pointer. This is updated.
3189 * @param cbItem The size of the stack item to pop.
3190 * @param puNewRsp Where to return the new RSP value.
3191 */
3192DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3193{
3194 RTGCPTR GCPtrTop;
3195
3196 if (pCtx->ssHid.Attr.n.u1Long)
3197 GCPtrTop = pTmpRsp->u -= cbItem;
3198 else if (pCtx->ssHid.Attr.n.u1DefBig)
3199 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3200 else
3201 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3202 return GCPtrTop;
3203}
3204
3205
3206/**
3207 * Gets the effective stack address for a pop of the specified size and
3208 * calculates and updates the temporary RSP.
3209 *
3210 * @returns Current stack pointer.
3211 * @param pTmpRsp The temporary stack pointer. This is updated.
3212 * @param pCtx Where to get the current stack mode.
3213 * @param cbItem The size of the stack item to pop.
3214 */
3215DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3216{
3217 RTGCPTR GCPtrTop;
3218 if (pCtx->ssHid.Attr.n.u1Long)
3219 {
3220 GCPtrTop = pTmpRsp->u;
3221 pTmpRsp->u += cbItem;
3222 }
3223 else if (pCtx->ssHid.Attr.n.u1DefBig)
3224 {
3225 GCPtrTop = pTmpRsp->DWords.dw0;
3226 pTmpRsp->DWords.dw0 += cbItem;
3227 }
3228 else
3229 {
3230 GCPtrTop = pTmpRsp->Words.w0;
3231 pTmpRsp->Words.w0 += cbItem;
3232 }
3233 return GCPtrTop;
3234}
3235
3236
3237/**
3238 * Checks if an Intel CPUID feature bit is set.
3239 *
3240 * @returns true / false.
3241 *
3242 * @param pIemCpu The IEM per CPU data.
3243 * @param fEdx The EDX bit to test, or 0 if ECX.
3244 * @param fEcx The ECX bit to test, or 0 if EDX.
3245 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3246 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3247 */
3248static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3249{
3250 uint32_t uEax, uEbx, uEcx, uEdx;
3251 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3252 return (fEcx && (uEcx & fEcx))
3253 || (fEdx && (uEdx & fEdx));
3254}
3255
3256
3257/**
3258 * Checks if an AMD CPUID feature bit is set.
3259 *
3260 * @returns true / false.
3261 *
3262 * @param pIemCpu The IEM per CPU data.
3263 * @param fEdx The EDX bit to test, or 0 if ECX.
3264 * @param fEcx The ECX bit to test, or 0 if EDX.
3265 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3266 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3267 */
3268static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3269{
3270 uint32_t uEax, uEbx, uEcx, uEdx;
3271 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3272 return (fEcx && (uEcx & fEcx))
3273 || (fEdx && (uEdx & fEdx));
3274}
3275
3276/** @} */
3277
3278
3279/** @name FPU access and helpers.
3280 *
3281 * @{
3282 */
3283
3284
3285/**
3286 * Hook for preparing to use the host FPU.
3287 *
3288 * This is necessary in ring-0 and raw-mode context.
3289 *
3290 * @param pIemCpu The IEM per CPU data.
3291 */
3292DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3293{
3294#ifdef IN_RING3
3295 NOREF(pIemCpu);
3296#else
3297# error "Implement me"
3298#endif
3299}
3300
3301
3302/**
3303 * Stores a QNaN value into a FPU register.
3304 *
3305 * @param pReg Pointer to the register.
3306 */
3307DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3308{
3309 pReg->au32[0] = UINT32_C(0x00000000);
3310 pReg->au32[1] = UINT32_C(0xc0000000);
3311 pReg->au16[4] = UINT16_C(0xffff);
3312}
3313
3314
3315/**
3316 * Updates the FOP, FPU.CS and FPUIP registers.
3317 *
3318 * @param pIemCpu The IEM per CPU data.
3319 * @param pCtx The CPU context.
3320 */
3321DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3322{
3323 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3324 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3325 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3326 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3327 {
3328 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3329 * happens in real mode here based on the fnsave and fnstenv images. */
3330 pCtx->fpu.CS = 0;
3331 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs << 4);
3332 }
3333 else
3334 {
3335 pCtx->fpu.CS = pCtx->cs;
3336 pCtx->fpu.FPUIP = pCtx->rip;
3337 }
3338}
3339
3340
3341/**
3342 * Updates the FPU.DS and FPUDP registers.
3343 *
3344 * @param pIemCpu The IEM per CPU data.
3345 * @param pCtx The CPU context.
3346 * @param iEffSeg The effective segment register.
3347 * @param GCPtrEff The effective address relative to @a iEffSeg.
3348 */
3349DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3350{
3351 RTSEL sel;
3352 switch (iEffSeg)
3353 {
3354 case X86_SREG_DS: sel = pCtx->ds; break;
3355 case X86_SREG_SS: sel = pCtx->ss; break;
3356 case X86_SREG_CS: sel = pCtx->cs; break;
3357 case X86_SREG_ES: sel = pCtx->es; break;
3358 case X86_SREG_FS: sel = pCtx->fs; break;
3359 case X86_SREG_GS: sel = pCtx->gs; break;
3360 default:
3361 AssertMsgFailed(("%d\n", iEffSeg));
3362 sel = pCtx->ds;
3363 }
3364 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3365 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3366 {
3367 pCtx->fpu.DS = 0;
3368 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3369 }
3370 else
3371 {
3372 pCtx->fpu.DS = sel;
3373 pCtx->fpu.FPUDP = GCPtrEff;
3374 }
3375}
3376
3377
3378/**
3379 * Rotates the stack registers in the push direction.
3380 *
3381 * @param pCtx The CPU context.
3382 * @remarks This is a complete waste of time, but fxsave stores the registers in
3383 * stack order.
3384 */
3385DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3386{
3387 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3388 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3389 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3390 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3391 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3392 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3393 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3394 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3395 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3396}
3397
3398
3399/**
3400 * Rotates the stack registers in the pop direction.
3401 *
3402 * @param pCtx The CPU context.
3403 * @remarks This is a complete waste of time, but fxsave stores the registers in
3404 * stack order.
3405 */
3406DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3407{
3408 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3409 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3410 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3411 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3412 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3413 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3414 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3415 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3416 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3417}
3418
3419
3420/**
3421 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3422 * exception prevents it.
3423 *
3424 * @param pIemCpu The IEM per CPU data.
3425 * @param pResult The FPU operation result to push.
3426 * @param pCtx The CPU context.
3427 */
3428static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3429{
3430 /* Update FSW and bail if there are pending exceptions afterwards. */
3431 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3432 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3433 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3434 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3435 {
3436 pCtx->fpu.FSW = fFsw;
3437 return;
3438 }
3439
3440 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3441 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3442 {
3443 /* All is fine, push the actual value. */
3444 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3445 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3446 }
3447 else if (pCtx->fpu.FCW & X86_FCW_IM)
3448 {
3449 /* Masked stack overflow, push QNaN. */
3450 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3451 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3452 }
3453 else
3454 {
3455 /* Raise stack overflow, don't push anything. */
3456 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3457 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3458 return;
3459 }
3460
3461 fFsw &= ~X86_FSW_TOP_MASK;
3462 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3463 pCtx->fpu.FSW = fFsw;
3464
3465 iemFpuRotateStackPush(pCtx);
3466}
3467
3468
3469/**
3470 * Stores a result in a FPU register and updates the FSW and FTW.
3471 *
3472 * @param pIemCpu The IEM per CPU data.
3473 * @param pResult The result to store.
3474 * @param iStReg Which FPU register to store it in.
3475 * @param pCtx The CPU context.
3476 */
3477static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3478{
3479 Assert(iStReg < 8);
3480 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3481 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3482 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3483 pCtx->fpu.FTW |= RT_BIT(iReg);
3484 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3485}
3486
3487
3488/**
3489 * Only updates the FPU status word (FSW) with the result of the current
3490 * instruction.
3491 *
3492 * @param pCtx The CPU context.
3493 * @param u16FSW The FSW output of the current instruction.
3494 */
3495static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3496{
3497 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3498 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3499}
3500
3501
3502/**
3503 * Pops one item off the FPU stack if no pending exception prevents it.
3504 *
3505 * @param pCtx The CPU context.
3506 */
3507static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3508{
3509 /* Check pending exceptions. */
3510 uint16_t uFSW = pCtx->fpu.FSW;
3511 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3512 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3513 return;
3514
3515 /* TOP--. */
3516 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3517 uFSW &= ~X86_FSW_TOP_MASK;
3518 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3519 pCtx->fpu.FSW = uFSW;
3520
3521 /* Mark the previous ST0 as empty. */
3522 iOldTop >>= X86_FSW_TOP_SHIFT;
3523 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3524
3525 /* Rotate the registers. */
3526 iemFpuRotateStackPop(pCtx);
3527}
3528
3529
3530/**
3531 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3532 *
3533 * @param pIemCpu The IEM per CPU data.
3534 * @param pResult The FPU operation result to push.
3535 */
3536static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3537{
3538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3539 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3540 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3541}
3542
3543
3544/**
3545 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3546 * and sets FPUDP and FPUDS.
3547 *
3548 * @param pIemCpu The IEM per CPU data.
3549 * @param pResult The FPU operation result to push.
3550 * @param iEffSeg The effective segment register.
3551 * @param GCPtrEff The effective address relative to @a iEffSeg.
3552 */
3553static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3554{
3555 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3556 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3557 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3558 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3559}
3560
3561
3562/**
3563 * Replace ST0 with the first value and push the second onto the FPU stack,
3564 * unless a pending exception prevents it.
3565 *
3566 * @param pIemCpu The IEM per CPU data.
3567 * @param pResult The FPU operation result to store and push.
3568 */
3569static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3570{
3571 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3572 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3573
3574 /* Update FSW and bail if there are pending exceptions afterwards. */
3575 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3576 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3577 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3578 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3579 {
3580 pCtx->fpu.FSW = fFsw;
3581 return;
3582 }
3583
3584 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3585 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3586 {
3587 /* All is fine, push the actual value. */
3588 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3589 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3590 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3591 }
3592 else if (pCtx->fpu.FCW & X86_FCW_IM)
3593 {
3594 /* Masked stack overflow, push QNaN. */
3595 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3596 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3597 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3598 }
3599 else
3600 {
3601 /* Raise stack overflow, don't push anything. */
3602 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3603 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3604 return;
3605 }
3606
3607 fFsw &= ~X86_FSW_TOP_MASK;
3608 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3609 pCtx->fpu.FSW = fFsw;
3610
3611 iemFpuRotateStackPush(pCtx);
3612}
3613
3614
3615/**
3616 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3617 * FOP.
3618 *
3619 * @param pIemCpu The IEM per CPU data.
3620 * @param pResult The result to store.
3621 * @param iStReg Which FPU register to store it in.
3622 * @param pCtx The CPU context.
3623 */
3624static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3625{
3626 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3627 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3628 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3629}
3630
3631
3632/**
3633 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3634 * FOP, and then pops the stack.
3635 *
3636 * @param pIemCpu The IEM per CPU data.
3637 * @param pResult The result to store.
3638 * @param iStReg Which FPU register to store it in.
3639 * @param pCtx The CPU context.
3640 */
3641static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3642{
3643 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3644 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3645 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3646 iemFpuMaybePopOne(pCtx);
3647}
3648
3649
3650/**
3651 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3652 * FPUDP, and FPUDS.
3653 *
3654 * @param pIemCpu The IEM per CPU data.
3655 * @param pResult The result to store.
3656 * @param iStReg Which FPU register to store it in.
3657 * @param pCtx The CPU context.
3658 * @param iEffSeg The effective memory operand selector register.
3659 * @param GCPtrEff The effective memory operand offset.
3660 */
3661static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3662{
3663 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3664 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3665 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3666 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3667}
3668
3669
3670/**
3671 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3672 * FPUDP, and FPUDS, and then pops the stack.
3673 *
3674 * @param pIemCpu The IEM per CPU data.
3675 * @param pResult The result to store.
3676 * @param iStReg Which FPU register to store it in.
3677 * @param pCtx The CPU context.
3678 * @param iEffSeg The effective memory operand selector register.
3679 * @param GCPtrEff The effective memory operand offset.
3680 */
3681static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
3682 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3683{
3684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3685 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3686 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3687 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3688 iemFpuMaybePopOne(pCtx);
3689}
3690
3691
3692/**
3693 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
3694 *
3695 * @param pIemCpu The IEM per CPU data.
3696 */
3697static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
3698{
3699 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
3700}
3701
3702
3703/**
3704 * Marks the specified stack register as free (for FFREE).
3705 *
3706 * @param pIemCpu The IEM per CPU data.
3707 * @param iStReg The register to free.
3708 */
3709static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
3710{
3711 Assert(iStReg < 8);
3712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3713 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3714 pCtx->fpu.FTW &= ~RT_BIT(iReg);
3715}
3716
3717
3718/**
3719 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
3720 *
3721 * @param pIemCpu The IEM per CPU data.
3722 */
3723static void iemFpuStackIncTop(PIEMCPU pIemCpu)
3724{
3725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3726 uint16_t uFsw = pCtx->fpu.FSW;
3727 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3728 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3729 uFsw &= ~X86_FSW_TOP_MASK;
3730 uFsw |= uTop;
3731 pCtx->fpu.FSW = uFsw;
3732}
3733
3734
3735/**
3736 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
3737 *
3738 * @param pIemCpu The IEM per CPU data.
3739 */
3740static void iemFpuStackDecTop(PIEMCPU pIemCpu)
3741{
3742 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3743 uint16_t uFsw = pCtx->fpu.FSW;
3744 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
3745 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3746 uFsw &= ~X86_FSW_TOP_MASK;
3747 uFsw |= uTop;
3748 pCtx->fpu.FSW = uFsw;
3749}
3750
3751
3752/**
3753 * Updates the FSW, FOP, FPUIP, and FPUCS.
3754 *
3755 * @param pIemCpu The IEM per CPU data.
3756 * @param u16FSW The FSW from the current instruction.
3757 */
3758static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
3759{
3760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3761 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3762 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3763}
3764
3765
3766/**
3767 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
3768 *
3769 * @param pIemCpu The IEM per CPU data.
3770 * @param u16FSW The FSW from the current instruction.
3771 */
3772static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
3773{
3774 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3775 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3776 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3777 iemFpuMaybePopOne(pCtx);
3778}
3779
3780
3781/**
3782 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
3783 *
3784 * @param pIemCpu The IEM per CPU data.
3785 * @param u16FSW The FSW from the current instruction.
3786 * @param iEffSeg The effective memory operand selector register.
3787 * @param GCPtrEff The effective memory operand offset.
3788 */
3789static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3790{
3791 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3792 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3793 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3794 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3795}
3796
3797
3798/**
3799 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
3800 *
3801 * @param pIemCpu The IEM per CPU data.
3802 * @param u16FSW The FSW from the current instruction.
3803 */
3804static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
3805{
3806 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3807 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3808 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3809 iemFpuMaybePopOne(pCtx);
3810 iemFpuMaybePopOne(pCtx);
3811}
3812
3813
3814/**
3815 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
3816 *
3817 * @param pIemCpu The IEM per CPU data.
3818 * @param u16FSW The FSW from the current instruction.
3819 * @param iEffSeg The effective memory operand selector register.
3820 * @param GCPtrEff The effective memory operand offset.
3821 */
3822static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3823{
3824 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3825 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3826 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3827 iemFpuUpdateFSWOnly(pCtx, u16FSW);
3828 iemFpuMaybePopOne(pCtx);
3829}
3830
3831
3832/**
3833 * Worker routine for raising an FPU stack underflow exception.
3834 *
3835 * @param pIemCpu The IEM per CPU data.
3836 * @param iStReg The stack register being accessed.
3837 * @param pCtx The CPU context.
3838 */
3839static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
3840{
3841 Assert(iStReg < 8 || iStReg == UINT8_MAX);
3842 if (pCtx->fpu.FCW & X86_FCW_IM)
3843 {
3844 /* Masked underflow. */
3845 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3846 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3847 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3848 if (iStReg != UINT8_MAX)
3849 {
3850 pCtx->fpu.FTW |= RT_BIT(iReg);
3851 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
3852 }
3853 }
3854 else
3855 {
3856 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3857 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3858 }
3859}
3860
3861
3862/**
3863 * Raises a FPU stack underflow exception.
3864 *
3865 * @param pIemCpu The IEM per CPU data.
3866 * @param iStReg The destination register that should be loaded
3867 * with QNaN if \#IS is not masked. Specify
3868 * UINT8_MAX if none (like for fcom).
3869 */
3870DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
3871{
3872 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3873 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3874 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3875}
3876
3877
3878DECL_NO_INLINE(static, void)
3879iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3880{
3881 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3882 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3883 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3884 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3885}
3886
3887
3888DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
3889{
3890 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3891 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3892 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3893 iemFpuMaybePopOne(pCtx);
3894}
3895
3896
3897DECL_NO_INLINE(static, void)
3898iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3899{
3900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3901 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3902 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3903 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
3904 iemFpuMaybePopOne(pCtx);
3905}
3906
3907
3908DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
3909{
3910 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3911 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3912 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
3913 iemFpuMaybePopOne(pCtx);
3914 iemFpuMaybePopOne(pCtx);
3915}
3916
3917
3918DECL_NO_INLINE(static, void)
3919iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
3920{
3921 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3922 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3923
3924 if (pCtx->fpu.FCW & X86_FCW_IM)
3925 {
3926 /* Masked overflow - Push QNaN. */
3927 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3928 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3929 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3930 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3931 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3932 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3933 iemFpuRotateStackPush(pCtx);
3934 }
3935 else
3936 {
3937 /* Exception pending - don't change TOP or the register stack. */
3938 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3939 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3940 }
3941}
3942
3943
3944DECL_NO_INLINE(static, void)
3945iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
3946{
3947 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3948 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3949
3950 if (pCtx->fpu.FCW & X86_FCW_IM)
3951 {
3952 /* Masked overflow - Push QNaN. */
3953 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3954 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3955 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
3956 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3957 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3958 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3959 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3960 iemFpuRotateStackPush(pCtx);
3961 }
3962 else
3963 {
3964 /* Exception pending - don't change TOP or the register stack. */
3965 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3966 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3967 }
3968}
3969
3970
3971/**
3972 * Worker routine for raising an FPU stack overflow exception on a push.
3973 *
3974 * @param pIemCpu The IEM per CPU data.
3975 * @param pCtx The CPU context.
3976 */
3977static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3978{
3979 if (pCtx->fpu.FCW & X86_FCW_IM)
3980 {
3981 /* Masked overflow. */
3982 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3983 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
3984 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3985 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
3986 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3987 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3988 iemFpuRotateStackPush(pCtx);
3989 }
3990 else
3991 {
3992 /* Exception pending - don't change TOP or the register stack. */
3993 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3994 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3995 }
3996}
3997
3998
3999/**
4000 * Raises a FPU stack overflow exception on a push.
4001 *
4002 * @param pIemCpu The IEM per CPU data.
4003 */
4004DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4005{
4006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4007 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4008 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4009}
4010
4011
4012/**
4013 * Raises a FPU stack overflow exception on a push with a memory operand.
4014 *
4015 * @param pIemCpu The IEM per CPU data.
4016 * @param iEffSeg The effective memory operand selector register.
4017 * @param GCPtrEff The effective memory operand offset.
4018 */
4019DECL_NO_INLINE(static, void)
4020iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4021{
4022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4023 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4024 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4025 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4026}
4027
4028
4029static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4030{
4031 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4032 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4033 if (pCtx->fpu.FTW & RT_BIT(iReg))
4034 return VINF_SUCCESS;
4035 return VERR_NOT_FOUND;
4036}
4037
4038
4039static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4040{
4041 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4042 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4043 if (pCtx->fpu.FTW & RT_BIT(iReg))
4044 {
4045 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4046 return VINF_SUCCESS;
4047 }
4048 return VERR_NOT_FOUND;
4049}
4050
4051
4052static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4053 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4054{
4055 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4056 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4057 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4058 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4059 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4060 {
4061 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4062 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4063 return VINF_SUCCESS;
4064 }
4065 return VERR_NOT_FOUND;
4066}
4067
4068
4069static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4070{
4071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4072 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4073 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4074 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4075 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4076 {
4077 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4078 return VINF_SUCCESS;
4079 }
4080 return VERR_NOT_FOUND;
4081}
4082
4083
4084/**
4085 * Updates the FPU exception status after FCW is changed.
4086 *
4087 * @param pCtx The CPU context.
4088 */
4089static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4090{
4091 uint16_t u16Fsw = pCtx->fpu.FSW;
4092 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4093 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4094 else
4095 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4096 pCtx->fpu.FSW = u16Fsw;
4097}
4098
4099
4100/**
4101 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4102 *
4103 * @returns The full FTW.
4104 * @param pCtx The CPU state.
4105 */
4106static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4107{
4108 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4109 uint16_t u16Ftw = 0;
4110 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4111 for (unsigned iSt = 0; iSt < 8; iSt++)
4112 {
4113 unsigned const iReg = (iSt + iTop) & 7;
4114 if (!(u8Ftw & RT_BIT(iReg)))
4115 u16Ftw |= 3 << (iReg * 2); /* empty */
4116 else
4117 {
4118 uint16_t uTag;
4119 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4120 if (pr80Reg->s.uExponent == 0x7fff)
4121 uTag = 2; /* Exponent is all 1's => Special. */
4122 else if (pr80Reg->s.uExponent == 0x0000)
4123 {
4124 if (pr80Reg->s.u64Mantissa == 0x0000)
4125 uTag = 1; /* All bits are zero => Zero. */
4126 else
4127 uTag = 2; /* Must be special. */
4128 }
4129 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4130 uTag = 0; /* Valid. */
4131 else
4132 uTag = 2; /* Must be special. */
4133
4134 u16Ftw |= uTag << (iReg * 2); /* empty */
4135 }
4136 }
4137
4138 return u16Ftw;
4139}
4140
4141
4142/**
4143 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4144 *
4145 * @returns The compressed FTW.
4146 * @param u16FullFtw The full FTW to convert.
4147 */
4148static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4149{
4150 uint8_t u8Ftw = 0;
4151 for (unsigned i = 0; i < 8; i++)
4152 {
4153 if ((u16FullFtw & 3) != 3 /*empty*/)
4154 u8Ftw |= RT_BIT(i);
4155 u16FullFtw >>= 2;
4156 }
4157
4158 return u8Ftw;
4159}
4160
4161/** @} */
4162
4163
4164/** @name Memory access.
4165 *
4166 * @{
4167 */
4168
4169
4170/**
4171 * Checks if the given segment can be written to, raise the appropriate
4172 * exception if not.
4173 *
4174 * @returns VBox strict status code.
4175 *
4176 * @param pIemCpu The IEM per CPU data.
4177 * @param pHid Pointer to the hidden register.
4178 * @param iSegReg The register number.
4179 */
4180static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4181{
4182 if (!pHid->Attr.n.u1Present)
4183 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4184
4185 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4186 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4187 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4188 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4189
4190 /** @todo DPL/RPL/CPL? */
4191
4192 return VINF_SUCCESS;
4193}
4194
4195
4196/**
4197 * Checks if the given segment can be read from, raise the appropriate
4198 * exception if not.
4199 *
4200 * @returns VBox strict status code.
4201 *
4202 * @param pIemCpu The IEM per CPU data.
4203 * @param pHid Pointer to the hidden register.
4204 * @param iSegReg The register number.
4205 */
4206static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4207{
4208 if (!pHid->Attr.n.u1Present)
4209 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4210
4211 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4212 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4213 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4214
4215 /** @todo DPL/RPL/CPL? */
4216
4217 return VINF_SUCCESS;
4218}
4219
4220
4221/**
4222 * Applies the segment limit, base and attributes.
4223 *
4224 * This may raise a \#GP or \#SS.
4225 *
4226 * @returns VBox strict status code.
4227 *
4228 * @param pIemCpu The IEM per CPU data.
4229 * @param fAccess The kind of access which is being performed.
4230 * @param iSegReg The index of the segment register to apply.
4231 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4232 * TSS, ++).
4233 * @param pGCPtrMem Pointer to the guest memory address to apply
4234 * segmentation to. Input and output parameter.
4235 */
4236static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4237 size_t cbMem, PRTGCPTR pGCPtrMem)
4238{
4239 if (iSegReg == UINT8_MAX)
4240 return VINF_SUCCESS;
4241
4242 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4243 switch (pIemCpu->enmCpuMode)
4244 {
4245 case IEMMODE_16BIT:
4246 case IEMMODE_32BIT:
4247 {
4248 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4249 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4250
4251 Assert(pSel->Attr.n.u1Present);
4252 Assert(pSel->Attr.n.u1DescType);
4253 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4254 {
4255 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4256 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4257 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4258
4259 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4260 {
4261 /** @todo CPL check. */
4262 }
4263
4264 /*
4265 * There are two kinds of data selectors, normal and expand down.
4266 */
4267 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4268 {
4269 if ( GCPtrFirst32 > pSel->u32Limit
4270 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4271 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4272
4273 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4274 }
4275 else
4276 {
4277 /** @todo implement expand down segments. */
4278 AssertFailed(/** @todo implement this */);
4279 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
4280 }
4281 }
4282 else
4283 {
4284
4285 /*
4286 * Code selector and usually be used to read thru, writing is
4287 * only permitted in real and V8086 mode.
4288 */
4289 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4290 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4291 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4292 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4293 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4294
4295 if ( GCPtrFirst32 > pSel->u32Limit
4296 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4297 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4298
4299 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4300 {
4301 /** @todo CPL check. */
4302 }
4303
4304 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4305 }
4306 return VINF_SUCCESS;
4307 }
4308
4309 case IEMMODE_64BIT:
4310 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4311 *pGCPtrMem += pSel->u64Base;
4312 return VINF_SUCCESS;
4313
4314 default:
4315 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4316 }
4317}
4318
4319
4320/**
4321 * Translates a virtual address to a physical physical address and checks if we
4322 * can access the page as specified.
4323 *
4324 * @param pIemCpu The IEM per CPU data.
4325 * @param GCPtrMem The virtual address.
4326 * @param fAccess The intended access.
4327 * @param pGCPhysMem Where to return the physical address.
4328 */
4329static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4330 PRTGCPHYS pGCPhysMem)
4331{
4332 /** @todo Need a different PGM interface here. We're currently using
4333 * generic / REM interfaces. this won't cut it for R0 & RC. */
4334 RTGCPHYS GCPhys;
4335 uint64_t fFlags;
4336 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4337 if (RT_FAILURE(rc))
4338 {
4339 /** @todo Check unassigned memory in unpaged mode. */
4340 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4341 *pGCPhysMem = NIL_RTGCPHYS;
4342 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4343 }
4344
4345 /* If the page is writable and does not have the no-exec bit set, all
4346 access is allowed. Otherwise we'll have to check more carefully... */
4347 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4348 {
4349 /* Write to read only memory? */
4350 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4351 && !(fFlags & X86_PTE_RW)
4352 && ( pIemCpu->uCpl != 0
4353 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4354 {
4355 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4356 *pGCPhysMem = NIL_RTGCPHYS;
4357 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4358 }
4359
4360 /* Kernel memory accessed by userland? */
4361 if ( !(fFlags & X86_PTE_US)
4362 && pIemCpu->uCpl == 3
4363 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4364 {
4365 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4366 *pGCPhysMem = NIL_RTGCPHYS;
4367 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4368 }
4369
4370 /* Executing non-executable memory? */
4371 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4372 && (fFlags & X86_PTE_PAE_NX)
4373 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4374 {
4375 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4376 *pGCPhysMem = NIL_RTGCPHYS;
4377 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4378 VERR_ACCESS_DENIED);
4379 }
4380 }
4381
4382 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4383 *pGCPhysMem = GCPhys;
4384 return VINF_SUCCESS;
4385}
4386
4387
4388
4389/**
4390 * Maps a physical page.
4391 *
4392 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4393 * @param pIemCpu The IEM per CPU data.
4394 * @param GCPhysMem The physical address.
4395 * @param fAccess The intended access.
4396 * @param ppvMem Where to return the mapping address.
4397 */
4398static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
4399{
4400#ifdef IEM_VERIFICATION_MODE
4401 /* Force the alternative path so we can ignore writes. */
4402 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4403 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4404#endif
4405
4406 /*
4407 * If we can map the page without trouble, do a block processing
4408 * until the end of the current page.
4409 */
4410 /** @todo need some better API. */
4411 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4412 GCPhysMem,
4413 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4414 ppvMem);
4415}
4416
4417
4418/**
4419 * Unmap a page previously mapped by iemMemPageMap.
4420 *
4421 * This is currently a dummy function.
4422 *
4423 * @param pIemCpu The IEM per CPU data.
4424 * @param GCPhysMem The physical address.
4425 * @param fAccess The intended access.
4426 * @param pvMem What iemMemPageMap returned.
4427 */
4428DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
4429{
4430 NOREF(pIemCpu);
4431 NOREF(GCPhysMem);
4432 NOREF(fAccess);
4433 NOREF(pvMem);
4434}
4435
4436
4437/**
4438 * Looks up a memory mapping entry.
4439 *
4440 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4441 * @param pIemCpu The IEM per CPU data.
4442 * @param pvMem The memory address.
4443 * @param fAccess The access to.
4444 */
4445DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4446{
4447 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4448 if ( pIemCpu->aMemMappings[0].pv == pvMem
4449 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4450 return 0;
4451 if ( pIemCpu->aMemMappings[1].pv == pvMem
4452 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4453 return 1;
4454 if ( pIemCpu->aMemMappings[2].pv == pvMem
4455 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4456 return 2;
4457 return VERR_NOT_FOUND;
4458}
4459
4460
4461/**
4462 * Finds a free memmap entry when using iNextMapping doesn't work.
4463 *
4464 * @returns Memory mapping index, 1024 on failure.
4465 * @param pIemCpu The IEM per CPU data.
4466 */
4467static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4468{
4469 /*
4470 * The easy case.
4471 */
4472 if (pIemCpu->cActiveMappings == 0)
4473 {
4474 pIemCpu->iNextMapping = 1;
4475 return 0;
4476 }
4477
4478 /* There should be enough mappings for all instructions. */
4479 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4480
4481 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4482 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4483 return i;
4484
4485 AssertFailedReturn(1024);
4486}
4487
4488
4489/**
4490 * Commits a bounce buffer that needs writing back and unmaps it.
4491 *
4492 * @returns Strict VBox status code.
4493 * @param pIemCpu The IEM per CPU data.
4494 * @param iMemMap The index of the buffer to commit.
4495 */
4496static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4497{
4498 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4499 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4500
4501 /*
4502 * Do the writing.
4503 */
4504 int rc;
4505 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4506 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4507 {
4508 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4509 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4510 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4511 if (!pIemCpu->fByPassHandlers)
4512 {
4513 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4514 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4515 pbBuf,
4516 cbFirst);
4517 if (cbSecond && rc == VINF_SUCCESS)
4518 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4519 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4520 pbBuf + cbFirst,
4521 cbSecond);
4522 }
4523 else
4524 {
4525 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4526 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4527 pbBuf,
4528 cbFirst);
4529 if (cbSecond && rc == VINF_SUCCESS)
4530 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4531 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4532 pbBuf + cbFirst,
4533 cbSecond);
4534 }
4535 }
4536 else
4537 rc = VINF_SUCCESS;
4538
4539#ifdef IEM_VERIFICATION_MODE
4540 /*
4541 * Record the write(s).
4542 */
4543 if (!pIemCpu->fNoRem)
4544 {
4545 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4546 if (pEvtRec)
4547 {
4548 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4549 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4550 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4551 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4552 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4553 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4554 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4555 }
4556 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4557 {
4558 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4559 if (pEvtRec)
4560 {
4561 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4562 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4563 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4564 memcpy(pEvtRec->u.RamWrite.ab,
4565 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4566 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4567 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4568 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4569 }
4570 }
4571 }
4572#endif
4573
4574 /*
4575 * Free the mapping entry.
4576 */
4577 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4578 Assert(pIemCpu->cActiveMappings != 0);
4579 pIemCpu->cActiveMappings--;
4580 return rc;
4581}
4582
4583
4584/**
4585 * iemMemMap worker that deals with a request crossing pages.
4586 */
4587static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4588 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4589{
4590 /*
4591 * Do the address translations.
4592 */
4593 RTGCPHYS GCPhysFirst;
4594 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4595 if (rcStrict != VINF_SUCCESS)
4596 return rcStrict;
4597
4598 RTGCPHYS GCPhysSecond;
4599 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4600 if (rcStrict != VINF_SUCCESS)
4601 return rcStrict;
4602 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4603
4604 /*
4605 * Read in the current memory content if it's a read, execute or partial
4606 * write access.
4607 */
4608 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4609 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4610 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4611
4612 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4613 {
4614 int rc;
4615 if (!pIemCpu->fByPassHandlers)
4616 {
4617 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4618 if (rc != VINF_SUCCESS)
4619 return rc;
4620 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4621 if (rc != VINF_SUCCESS)
4622 return rc;
4623 }
4624 else
4625 {
4626 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4627 if (rc != VINF_SUCCESS)
4628 return rc;
4629 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4630 if (rc != VINF_SUCCESS)
4631 return rc;
4632 }
4633
4634#ifdef IEM_VERIFICATION_MODE
4635 if ( !pIemCpu->fNoRem
4636 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
4637 {
4638 /*
4639 * Record the reads.
4640 */
4641 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4642 if (pEvtRec)
4643 {
4644 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4645 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4646 pEvtRec->u.RamRead.cb = cbFirstPage;
4647 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4648 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4649 }
4650 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4651 if (pEvtRec)
4652 {
4653 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4654 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4655 pEvtRec->u.RamRead.cb = cbSecondPage;
4656 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4657 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4658 }
4659 }
4660#endif
4661 }
4662#ifdef VBOX_STRICT
4663 else
4664 memset(pbBuf, 0xcc, cbMem);
4665#endif
4666#ifdef VBOX_STRICT
4667 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4668 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4669#endif
4670
4671 /*
4672 * Commit the bounce buffer entry.
4673 */
4674 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4675 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4676 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4677 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4678 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4679 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4680 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4681 pIemCpu->cActiveMappings++;
4682
4683 *ppvMem = pbBuf;
4684 return VINF_SUCCESS;
4685}
4686
4687
4688/**
4689 * iemMemMap woker that deals with iemMemPageMap failures.
4690 */
4691static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4692 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4693{
4694 /*
4695 * Filter out conditions we can handle and the ones which shouldn't happen.
4696 */
4697 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
4698 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4699 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4700 {
4701 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4702 return rcMap;
4703 }
4704 pIemCpu->cPotentialExits++;
4705
4706 /*
4707 * Read in the current memory content if it's a read, execute or partial
4708 * write access.
4709 */
4710 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4711 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4712 {
4713 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
4714 memset(pbBuf, 0xff, cbMem);
4715 else
4716 {
4717 int rc;
4718 if (!pIemCpu->fByPassHandlers)
4719 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
4720 else
4721 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
4722 if (rc != VINF_SUCCESS)
4723 return rc;
4724 }
4725
4726#ifdef IEM_VERIFICATION_MODE
4727 if ( !pIemCpu->fNoRem
4728 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
4729 {
4730 /*
4731 * Record the read.
4732 */
4733 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4734 if (pEvtRec)
4735 {
4736 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4737 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4738 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
4739 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4740 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4741 }
4742 }
4743#endif
4744 }
4745#ifdef VBOX_STRICT
4746 else
4747 memset(pbBuf, 0xcc, cbMem);
4748#endif
4749#ifdef VBOX_STRICT
4750 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4751 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4752#endif
4753
4754 /*
4755 * Commit the bounce buffer entry.
4756 */
4757 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4758 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
4759 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
4760 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
4761 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
4762 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4763 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4764 pIemCpu->cActiveMappings++;
4765
4766 *ppvMem = pbBuf;
4767 return VINF_SUCCESS;
4768}
4769
4770
4771
4772/**
4773 * Maps the specified guest memory for the given kind of access.
4774 *
4775 * This may be using bounce buffering of the memory if it's crossing a page
4776 * boundary or if there is an access handler installed for any of it. Because
4777 * of lock prefix guarantees, we're in for some extra clutter when this
4778 * happens.
4779 *
4780 * This may raise a \#GP, \#SS, \#PF or \#AC.
4781 *
4782 * @returns VBox strict status code.
4783 *
4784 * @param pIemCpu The IEM per CPU data.
4785 * @param ppvMem Where to return the pointer to the mapped
4786 * memory.
4787 * @param cbMem The number of bytes to map. This is usually 1,
4788 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
4789 * string operations it can be up to a page.
4790 * @param iSegReg The index of the segment register to use for
4791 * this access. The base and limits are checked.
4792 * Use UINT8_MAX to indicate that no segmentation
4793 * is required (for IDT, GDT and LDT accesses).
4794 * @param GCPtrMem The address of the guest memory.
4795 * @param a_fAccess How the memory is being accessed. The
4796 * IEM_ACCESS_TYPE_XXX bit is used to figure out
4797 * how to map the memory, while the
4798 * IEM_ACCESS_WHAT_XXX bit is used when raising
4799 * exceptions.
4800 */
4801static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
4802{
4803 /*
4804 * Check the input and figure out which mapping entry to use.
4805 */
4806 Assert(cbMem <= 32 || cbMem == 512);
4807 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
4808
4809 unsigned iMemMap = pIemCpu->iNextMapping;
4810 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
4811 {
4812 iMemMap = iemMemMapFindFree(pIemCpu);
4813 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
4814 }
4815
4816 /*
4817 * Map the memory, checking that we can actually access it. If something
4818 * slightly complicated happens, fall back on bounce buffering.
4819 */
4820 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
4821 if (rcStrict != VINF_SUCCESS)
4822 return rcStrict;
4823
4824 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
4825 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
4826
4827 RTGCPHYS GCPhysFirst;
4828 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
4829 if (rcStrict != VINF_SUCCESS)
4830 return rcStrict;
4831
4832 void *pvMem;
4833 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
4834 if (rcStrict != VINF_SUCCESS)
4835 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
4836
4837 /*
4838 * Fill in the mapping table entry.
4839 */
4840 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
4841 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
4842 pIemCpu->iNextMapping = iMemMap + 1;
4843 pIemCpu->cActiveMappings++;
4844
4845 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4846 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4847 pIemCpu->cbWritten += cbMem;
4848 *ppvMem = pvMem;
4849 return VINF_SUCCESS;
4850}
4851
4852
4853/**
4854 * Commits the guest memory if bounce buffered and unmaps it.
4855 *
4856 * @returns Strict VBox status code.
4857 * @param pIemCpu The IEM per CPU data.
4858 * @param pvMem The mapping.
4859 * @param fAccess The kind of access.
4860 */
4861static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4862{
4863 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
4864 AssertReturn(iMemMap >= 0, iMemMap);
4865
4866 /*
4867 * If it's bounce buffered, we need to write back the buffer.
4868 */
4869 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4870 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4871 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
4872
4873 /* Free the entry. */
4874 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4875 Assert(pIemCpu->cActiveMappings != 0);
4876 pIemCpu->cActiveMappings--;
4877 return VINF_SUCCESS;
4878}
4879
4880
4881/**
4882 * Fetches a data byte.
4883 *
4884 * @returns Strict VBox status code.
4885 * @param pIemCpu The IEM per CPU data.
4886 * @param pu8Dst Where to return the byte.
4887 * @param iSegReg The index of the segment register to use for
4888 * this access. The base and limits are checked.
4889 * @param GCPtrMem The address of the guest memory.
4890 */
4891static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4892{
4893 /* The lazy approach for now... */
4894 uint8_t const *pu8Src;
4895 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4896 if (rc == VINF_SUCCESS)
4897 {
4898 *pu8Dst = *pu8Src;
4899 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4900 }
4901 return rc;
4902}
4903
4904
4905/**
4906 * Fetches a data word.
4907 *
4908 * @returns Strict VBox status code.
4909 * @param pIemCpu The IEM per CPU data.
4910 * @param pu16Dst Where to return the word.
4911 * @param iSegReg The index of the segment register to use for
4912 * this access. The base and limits are checked.
4913 * @param GCPtrMem The address of the guest memory.
4914 */
4915static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4916{
4917 /* The lazy approach for now... */
4918 uint16_t const *pu16Src;
4919 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4920 if (rc == VINF_SUCCESS)
4921 {
4922 *pu16Dst = *pu16Src;
4923 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
4924 }
4925 return rc;
4926}
4927
4928
4929/**
4930 * Fetches a data dword.
4931 *
4932 * @returns Strict VBox status code.
4933 * @param pIemCpu The IEM per CPU data.
4934 * @param pu32Dst Where to return the dword.
4935 * @param iSegReg The index of the segment register to use for
4936 * this access. The base and limits are checked.
4937 * @param GCPtrMem The address of the guest memory.
4938 */
4939static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4940{
4941 /* The lazy approach for now... */
4942 uint32_t const *pu32Src;
4943 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4944 if (rc == VINF_SUCCESS)
4945 {
4946 *pu32Dst = *pu32Src;
4947 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
4948 }
4949 return rc;
4950}
4951
4952
4953#ifdef SOME_UNUSED_FUNCTION
4954/**
4955 * Fetches a data dword and sign extends it to a qword.
4956 *
4957 * @returns Strict VBox status code.
4958 * @param pIemCpu The IEM per CPU data.
4959 * @param pu64Dst Where to return the sign extended value.
4960 * @param iSegReg The index of the segment register to use for
4961 * this access. The base and limits are checked.
4962 * @param GCPtrMem The address of the guest memory.
4963 */
4964static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4965{
4966 /* The lazy approach for now... */
4967 int32_t const *pi32Src;
4968 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4969 if (rc == VINF_SUCCESS)
4970 {
4971 *pu64Dst = *pi32Src;
4972 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
4973 }
4974#ifdef __GNUC__ /* warning: GCC may be a royal pain */
4975 else
4976 *pu64Dst = 0;
4977#endif
4978 return rc;
4979}
4980#endif
4981
4982
4983/**
4984 * Fetches a data qword.
4985 *
4986 * @returns Strict VBox status code.
4987 * @param pIemCpu The IEM per CPU data.
4988 * @param pu64Dst Where to return the qword.
4989 * @param iSegReg The index of the segment register to use for
4990 * this access. The base and limits are checked.
4991 * @param GCPtrMem The address of the guest memory.
4992 */
4993static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4994{
4995 /* The lazy approach for now... */
4996 uint64_t const *pu64Src;
4997 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4998 if (rc == VINF_SUCCESS)
4999 {
5000 *pu64Dst = *pu64Src;
5001 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5002 }
5003 return rc;
5004}
5005
5006
5007/**
5008 * Fetches a data tword.
5009 *
5010 * @returns Strict VBox status code.
5011 * @param pIemCpu The IEM per CPU data.
5012 * @param pr80Dst Where to return the tword.
5013 * @param iSegReg The index of the segment register to use for
5014 * this access. The base and limits are checked.
5015 * @param GCPtrMem The address of the guest memory.
5016 */
5017static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5018{
5019 /* The lazy approach for now... */
5020 PCRTFLOAT80U pr80Src;
5021 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5022 if (rc == VINF_SUCCESS)
5023 {
5024 *pr80Dst = *pr80Src;
5025 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5026 }
5027 return rc;
5028}
5029
5030
5031/**
5032 * Fetches a descriptor register (lgdt, lidt).
5033 *
5034 * @returns Strict VBox status code.
5035 * @param pIemCpu The IEM per CPU data.
5036 * @param pcbLimit Where to return the limit.
5037 * @param pGCPTrBase Where to return the base.
5038 * @param iSegReg The index of the segment register to use for
5039 * this access. The base and limits are checked.
5040 * @param GCPtrMem The address of the guest memory.
5041 * @param enmOpSize The effective operand size.
5042 */
5043static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5044 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5045{
5046 uint8_t const *pu8Src;
5047 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5048 (void **)&pu8Src,
5049 enmOpSize == IEMMODE_64BIT
5050 ? 2 + 8
5051 : enmOpSize == IEMMODE_32BIT
5052 ? 2 + 4
5053 : 2 + 3,
5054 iSegReg,
5055 GCPtrMem,
5056 IEM_ACCESS_DATA_R);
5057 if (rcStrict == VINF_SUCCESS)
5058 {
5059 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5060 switch (enmOpSize)
5061 {
5062 case IEMMODE_16BIT:
5063 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5064 break;
5065 case IEMMODE_32BIT:
5066 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5067 break;
5068 case IEMMODE_64BIT:
5069 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5070 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5071 break;
5072
5073 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5074 }
5075 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5076 }
5077 return rcStrict;
5078}
5079
5080
5081
5082/**
5083 * Stores a data byte.
5084 *
5085 * @returns Strict VBox status code.
5086 * @param pIemCpu The IEM per CPU data.
5087 * @param iSegReg The index of the segment register to use for
5088 * this access. The base and limits are checked.
5089 * @param GCPtrMem The address of the guest memory.
5090 * @param u8Value The value to store.
5091 */
5092static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5093{
5094 /* The lazy approach for now... */
5095 uint8_t *pu8Dst;
5096 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5097 if (rc == VINF_SUCCESS)
5098 {
5099 *pu8Dst = u8Value;
5100 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5101 }
5102 return rc;
5103}
5104
5105
5106/**
5107 * Stores a data word.
5108 *
5109 * @returns Strict VBox status code.
5110 * @param pIemCpu The IEM per CPU data.
5111 * @param iSegReg The index of the segment register to use for
5112 * this access. The base and limits are checked.
5113 * @param GCPtrMem The address of the guest memory.
5114 * @param u16Value The value to store.
5115 */
5116static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5117{
5118 /* The lazy approach for now... */
5119 uint16_t *pu16Dst;
5120 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5121 if (rc == VINF_SUCCESS)
5122 {
5123 *pu16Dst = u16Value;
5124 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5125 }
5126 return rc;
5127}
5128
5129
5130/**
5131 * Stores a data dword.
5132 *
5133 * @returns Strict VBox status code.
5134 * @param pIemCpu The IEM per CPU data.
5135 * @param iSegReg The index of the segment register to use for
5136 * this access. The base and limits are checked.
5137 * @param GCPtrMem The address of the guest memory.
5138 * @param u32Value The value to store.
5139 */
5140static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5141{
5142 /* The lazy approach for now... */
5143 uint32_t *pu32Dst;
5144 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5145 if (rc == VINF_SUCCESS)
5146 {
5147 *pu32Dst = u32Value;
5148 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5149 }
5150 return rc;
5151}
5152
5153
5154/**
5155 * Stores a data qword.
5156 *
5157 * @returns Strict VBox status code.
5158 * @param pIemCpu The IEM per CPU data.
5159 * @param iSegReg The index of the segment register to use for
5160 * this access. The base and limits are checked.
5161 * @param GCPtrMem The address of the guest memory.
5162 * @param u64Value The value to store.
5163 */
5164static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5165{
5166 /* The lazy approach for now... */
5167 uint64_t *pu64Dst;
5168 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5169 if (rc == VINF_SUCCESS)
5170 {
5171 *pu64Dst = u64Value;
5172 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5173 }
5174 return rc;
5175}
5176
5177
5178/**
5179 * Pushes a word onto the stack.
5180 *
5181 * @returns Strict VBox status code.
5182 * @param pIemCpu The IEM per CPU data.
5183 * @param u16Value The value to push.
5184 */
5185static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5186{
5187 /* Increment the stack pointer. */
5188 uint64_t uNewRsp;
5189 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5190 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5191
5192 /* Write the word the lazy way. */
5193 uint16_t *pu16Dst;
5194 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5195 if (rc == VINF_SUCCESS)
5196 {
5197 *pu16Dst = u16Value;
5198 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5199 }
5200
5201 /* Commit the new RSP value unless we an access handler made trouble. */
5202 if (rc == VINF_SUCCESS)
5203 pCtx->rsp = uNewRsp;
5204
5205 return rc;
5206}
5207
5208
5209/**
5210 * Pushes a dword onto the stack.
5211 *
5212 * @returns Strict VBox status code.
5213 * @param pIemCpu The IEM per CPU data.
5214 * @param u32Value The value to push.
5215 */
5216static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5217{
5218 /* Increment the stack pointer. */
5219 uint64_t uNewRsp;
5220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5221 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5222
5223 /* Write the word the lazy way. */
5224 uint32_t *pu32Dst;
5225 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5226 if (rc == VINF_SUCCESS)
5227 {
5228 *pu32Dst = u32Value;
5229 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5230 }
5231
5232 /* Commit the new RSP value unless we an access handler made trouble. */
5233 if (rc == VINF_SUCCESS)
5234 pCtx->rsp = uNewRsp;
5235
5236 return rc;
5237}
5238
5239
5240/**
5241 * Pushes a qword onto the stack.
5242 *
5243 * @returns Strict VBox status code.
5244 * @param pIemCpu The IEM per CPU data.
5245 * @param u64Value The value to push.
5246 */
5247static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5248{
5249 /* Increment the stack pointer. */
5250 uint64_t uNewRsp;
5251 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5252 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5253
5254 /* Write the word the lazy way. */
5255 uint64_t *pu64Dst;
5256 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5257 if (rc == VINF_SUCCESS)
5258 {
5259 *pu64Dst = u64Value;
5260 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5261 }
5262
5263 /* Commit the new RSP value unless we an access handler made trouble. */
5264 if (rc == VINF_SUCCESS)
5265 pCtx->rsp = uNewRsp;
5266
5267 return rc;
5268}
5269
5270
5271/**
5272 * Pops a word from the stack.
5273 *
5274 * @returns Strict VBox status code.
5275 * @param pIemCpu The IEM per CPU data.
5276 * @param pu16Value Where to store the popped value.
5277 */
5278static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5279{
5280 /* Increment the stack pointer. */
5281 uint64_t uNewRsp;
5282 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5283 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5284
5285 /* Write the word the lazy way. */
5286 uint16_t const *pu16Src;
5287 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5288 if (rc == VINF_SUCCESS)
5289 {
5290 *pu16Value = *pu16Src;
5291 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5292
5293 /* Commit the new RSP value. */
5294 if (rc == VINF_SUCCESS)
5295 pCtx->rsp = uNewRsp;
5296 }
5297
5298 return rc;
5299}
5300
5301
5302/**
5303 * Pops a dword from the stack.
5304 *
5305 * @returns Strict VBox status code.
5306 * @param pIemCpu The IEM per CPU data.
5307 * @param pu32Value Where to store the popped value.
5308 */
5309static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5310{
5311 /* Increment the stack pointer. */
5312 uint64_t uNewRsp;
5313 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5314 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5315
5316 /* Write the word the lazy way. */
5317 uint32_t const *pu32Src;
5318 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5319 if (rc == VINF_SUCCESS)
5320 {
5321 *pu32Value = *pu32Src;
5322 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5323
5324 /* Commit the new RSP value. */
5325 if (rc == VINF_SUCCESS)
5326 pCtx->rsp = uNewRsp;
5327 }
5328
5329 return rc;
5330}
5331
5332
5333/**
5334 * Pops a qword from the stack.
5335 *
5336 * @returns Strict VBox status code.
5337 * @param pIemCpu The IEM per CPU data.
5338 * @param pu64Value Where to store the popped value.
5339 */
5340static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5341{
5342 /* Increment the stack pointer. */
5343 uint64_t uNewRsp;
5344 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5345 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5346
5347 /* Write the word the lazy way. */
5348 uint64_t const *pu64Src;
5349 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5350 if (rc == VINF_SUCCESS)
5351 {
5352 *pu64Value = *pu64Src;
5353 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5354
5355 /* Commit the new RSP value. */
5356 if (rc == VINF_SUCCESS)
5357 pCtx->rsp = uNewRsp;
5358 }
5359
5360 return rc;
5361}
5362
5363
5364/**
5365 * Pushes a word onto the stack, using a temporary stack pointer.
5366 *
5367 * @returns Strict VBox status code.
5368 * @param pIemCpu The IEM per CPU data.
5369 * @param u16Value The value to push.
5370 * @param pTmpRsp Pointer to the temporary stack pointer.
5371 */
5372static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5373{
5374 /* Increment the stack pointer. */
5375 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5376 RTUINT64U NewRsp = *pTmpRsp;
5377 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5378
5379 /* Write the word the lazy way. */
5380 uint16_t *pu16Dst;
5381 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5382 if (rc == VINF_SUCCESS)
5383 {
5384 *pu16Dst = u16Value;
5385 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5386 }
5387
5388 /* Commit the new RSP value unless we an access handler made trouble. */
5389 if (rc == VINF_SUCCESS)
5390 *pTmpRsp = NewRsp;
5391
5392 return rc;
5393}
5394
5395
5396/**
5397 * Pushes a dword onto the stack, using a temporary stack pointer.
5398 *
5399 * @returns Strict VBox status code.
5400 * @param pIemCpu The IEM per CPU data.
5401 * @param u32Value The value to push.
5402 * @param pTmpRsp Pointer to the temporary stack pointer.
5403 */
5404static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5405{
5406 /* Increment the stack pointer. */
5407 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5408 RTUINT64U NewRsp = *pTmpRsp;
5409 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5410
5411 /* Write the word the lazy way. */
5412 uint32_t *pu32Dst;
5413 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5414 if (rc == VINF_SUCCESS)
5415 {
5416 *pu32Dst = u32Value;
5417 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5418 }
5419
5420 /* Commit the new RSP value unless we an access handler made trouble. */
5421 if (rc == VINF_SUCCESS)
5422 *pTmpRsp = NewRsp;
5423
5424 return rc;
5425}
5426
5427
5428#ifdef SOME_UNUSED_FUNCTION
5429/**
5430 * Pushes a dword onto the stack, using a temporary stack pointer.
5431 *
5432 * @returns Strict VBox status code.
5433 * @param pIemCpu The IEM per CPU data.
5434 * @param u64Value The value to push.
5435 * @param pTmpRsp Pointer to the temporary stack pointer.
5436 */
5437static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5438{
5439 /* Increment the stack pointer. */
5440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5441 RTUINT64U NewRsp = *pTmpRsp;
5442 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5443
5444 /* Write the word the lazy way. */
5445 uint64_t *pu64Dst;
5446 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5447 if (rc == VINF_SUCCESS)
5448 {
5449 *pu64Dst = u64Value;
5450 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5451 }
5452
5453 /* Commit the new RSP value unless we an access handler made trouble. */
5454 if (rc == VINF_SUCCESS)
5455 *pTmpRsp = NewRsp;
5456
5457 return rc;
5458}
5459#endif
5460
5461
5462/**
5463 * Pops a word from the stack, using a temporary stack pointer.
5464 *
5465 * @returns Strict VBox status code.
5466 * @param pIemCpu The IEM per CPU data.
5467 * @param pu16Value Where to store the popped value.
5468 * @param pTmpRsp Pointer to the temporary stack pointer.
5469 */
5470static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5471{
5472 /* Increment the stack pointer. */
5473 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5474 RTUINT64U NewRsp = *pTmpRsp;
5475 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5476
5477 /* Write the word the lazy way. */
5478 uint16_t const *pu16Src;
5479 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5480 if (rc == VINF_SUCCESS)
5481 {
5482 *pu16Value = *pu16Src;
5483 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5484
5485 /* Commit the new RSP value. */
5486 if (rc == VINF_SUCCESS)
5487 *pTmpRsp = NewRsp;
5488 }
5489
5490 return rc;
5491}
5492
5493
5494/**
5495 * Pops a dword from the stack, using a temporary stack pointer.
5496 *
5497 * @returns Strict VBox status code.
5498 * @param pIemCpu The IEM per CPU data.
5499 * @param pu32Value Where to store the popped value.
5500 * @param pTmpRsp Pointer to the temporary stack pointer.
5501 */
5502static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5503{
5504 /* Increment the stack pointer. */
5505 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5506 RTUINT64U NewRsp = *pTmpRsp;
5507 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5508
5509 /* Write the word the lazy way. */
5510 uint32_t const *pu32Src;
5511 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5512 if (rc == VINF_SUCCESS)
5513 {
5514 *pu32Value = *pu32Src;
5515 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5516
5517 /* Commit the new RSP value. */
5518 if (rc == VINF_SUCCESS)
5519 *pTmpRsp = NewRsp;
5520 }
5521
5522 return rc;
5523}
5524
5525
5526/**
5527 * Pops a qword from the stack, using a temporary stack pointer.
5528 *
5529 * @returns Strict VBox status code.
5530 * @param pIemCpu The IEM per CPU data.
5531 * @param pu64Value Where to store the popped value.
5532 * @param pTmpRsp Pointer to the temporary stack pointer.
5533 */
5534static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5535{
5536 /* Increment the stack pointer. */
5537 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5538 RTUINT64U NewRsp = *pTmpRsp;
5539 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5540
5541 /* Write the word the lazy way. */
5542 uint64_t const *pu64Src;
5543 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5544 if (rcStrict == VINF_SUCCESS)
5545 {
5546 *pu64Value = *pu64Src;
5547 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5548
5549 /* Commit the new RSP value. */
5550 if (rcStrict == VINF_SUCCESS)
5551 *pTmpRsp = NewRsp;
5552 }
5553
5554 return rcStrict;
5555}
5556
5557
5558/**
5559 * Begin a special stack push (used by interrupt, exceptions and such).
5560 *
5561 * This will raise #SS or #PF if appropriate.
5562 *
5563 * @returns Strict VBox status code.
5564 * @param pIemCpu The IEM per CPU data.
5565 * @param cbMem The number of bytes to push onto the stack.
5566 * @param ppvMem Where to return the pointer to the stack memory.
5567 * As with the other memory functions this could be
5568 * direct access or bounce buffered access, so
5569 * don't commit register until the commit call
5570 * succeeds.
5571 * @param puNewRsp Where to return the new RSP value. This must be
5572 * passed unchanged to
5573 * iemMemStackPushCommitSpecial().
5574 */
5575static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
5576{
5577 Assert(cbMem < UINT8_MAX);
5578 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5579 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
5580 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5581}
5582
5583
5584/**
5585 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
5586 *
5587 * This will update the rSP.
5588 *
5589 * @returns Strict VBox status code.
5590 * @param pIemCpu The IEM per CPU data.
5591 * @param pvMem The pointer returned by
5592 * iemMemStackPushBeginSpecial().
5593 * @param uNewRsp The new RSP value returned by
5594 * iemMemStackPushBeginSpecial().
5595 */
5596static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
5597{
5598 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
5599 if (rcStrict == VINF_SUCCESS)
5600 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5601 return rcStrict;
5602}
5603
5604
5605/**
5606 * Begin a special stack pop (used by iret, retf and such).
5607 *
5608 * This will raise \#SS or \#PF if appropriate.
5609 *
5610 * @returns Strict VBox status code.
5611 * @param pIemCpu The IEM per CPU data.
5612 * @param cbMem The number of bytes to push onto the stack.
5613 * @param ppvMem Where to return the pointer to the stack memory.
5614 * @param puNewRsp Where to return the new RSP value. This must be
5615 * passed unchanged to
5616 * iemMemStackPopCommitSpecial() or applied
5617 * manually if iemMemStackPopDoneSpecial() is used.
5618 */
5619static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5620{
5621 Assert(cbMem < UINT8_MAX);
5622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5623 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
5624 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5625}
5626
5627
5628/**
5629 * Continue a special stack pop (used by iret and retf).
5630 *
5631 * This will raise \#SS or \#PF if appropriate.
5632 *
5633 * @returns Strict VBox status code.
5634 * @param pIemCpu The IEM per CPU data.
5635 * @param cbMem The number of bytes to push onto the stack.
5636 * @param ppvMem Where to return the pointer to the stack memory.
5637 * @param puNewRsp Where to return the new RSP value. This must be
5638 * passed unchanged to
5639 * iemMemStackPopCommitSpecial() or applied
5640 * manually if iemMemStackPopDoneSpecial() is used.
5641 */
5642static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5643{
5644 Assert(cbMem < UINT8_MAX);
5645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5646 RTUINT64U NewRsp;
5647 NewRsp.u = *puNewRsp;
5648 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5649 *puNewRsp = NewRsp.u;
5650 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5651}
5652
5653
5654/**
5655 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
5656 *
5657 * This will update the rSP.
5658 *
5659 * @returns Strict VBox status code.
5660 * @param pIemCpu The IEM per CPU data.
5661 * @param pvMem The pointer returned by
5662 * iemMemStackPopBeginSpecial().
5663 * @param uNewRsp The new RSP value returned by
5664 * iemMemStackPopBeginSpecial().
5665 */
5666static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
5667{
5668 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5669 if (rcStrict == VINF_SUCCESS)
5670 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5671 return rcStrict;
5672}
5673
5674
5675/**
5676 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
5677 * iemMemStackPopContinueSpecial).
5678 *
5679 * The caller will manually commit the rSP.
5680 *
5681 * @returns Strict VBox status code.
5682 * @param pIemCpu The IEM per CPU data.
5683 * @param pvMem The pointer returned by
5684 * iemMemStackPopBeginSpecial() or
5685 * iemMemStackPopContinueSpecial().
5686 */
5687static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
5688{
5689 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5690}
5691
5692
5693/**
5694 * Fetches a system table dword.
5695 *
5696 * @returns Strict VBox status code.
5697 * @param pIemCpu The IEM per CPU data.
5698 * @param pu32Dst Where to return the dword.
5699 * @param iSegReg The index of the segment register to use for
5700 * this access. The base and limits are checked.
5701 * @param GCPtrMem The address of the guest memory.
5702 */
5703static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5704{
5705 /* The lazy approach for now... */
5706 uint32_t const *pu32Src;
5707 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5708 if (rc == VINF_SUCCESS)
5709 {
5710 *pu32Dst = *pu32Src;
5711 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
5712 }
5713 return rc;
5714}
5715
5716
5717/**
5718 * Fetches a system table qword.
5719 *
5720 * @returns Strict VBox status code.
5721 * @param pIemCpu The IEM per CPU data.
5722 * @param pu64Dst Where to return the qword.
5723 * @param iSegReg The index of the segment register to use for
5724 * this access. The base and limits are checked.
5725 * @param GCPtrMem The address of the guest memory.
5726 */
5727static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5728{
5729 /* The lazy approach for now... */
5730 uint64_t const *pu64Src;
5731 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5732 if (rc == VINF_SUCCESS)
5733 {
5734 *pu64Dst = *pu64Src;
5735 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
5736 }
5737 return rc;
5738}
5739
5740
5741/**
5742 * Fetches a descriptor table entry.
5743 *
5744 * @returns Strict VBox status code.
5745 * @param pIemCpu The IEM per CPU.
5746 * @param pDesc Where to return the descriptor table entry.
5747 * @param uSel The selector which table entry to fetch.
5748 */
5749static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
5750{
5751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5752
5753 /** @todo did the 286 require all 8 bytes to be accessible? */
5754 /*
5755 * Get the selector table base and check bounds.
5756 */
5757 RTGCPTR GCPtrBase;
5758 if (uSel & X86_SEL_LDT)
5759 {
5760 if ( !pCtx->ldtrHid.Attr.n.u1Present
5761 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
5762 {
5763 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
5764 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
5765 /** @todo is this the right exception? */
5766 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5767 }
5768
5769 Assert(pCtx->ldtrHid.Attr.n.u1Present);
5770 GCPtrBase = pCtx->ldtrHid.u64Base;
5771 }
5772 else
5773 {
5774 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
5775 {
5776 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
5777 /** @todo is this the right exception? */
5778 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5779 }
5780 GCPtrBase = pCtx->gdtr.pGdt;
5781 }
5782
5783 /*
5784 * Read the legacy descriptor and maybe the long mode extensions if
5785 * required.
5786 */
5787 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5788 if (rcStrict == VINF_SUCCESS)
5789 {
5790 if ( !IEM_IS_LONG_MODE(pIemCpu)
5791 || pDesc->Legacy.Gen.u1DescType)
5792 pDesc->Long.au64[1] = 0;
5793 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
5794 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5795 else
5796 {
5797 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
5798 /** @todo is this the right exception? */
5799 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5800 }
5801 }
5802 return rcStrict;
5803}
5804
5805
5806/**
5807 * Fakes a long mode stack selector for SS = 0.
5808 *
5809 * @param pDescSs Where to return the fake stack descriptor.
5810 * @param uDpl The DPL we want.
5811 */
5812static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
5813{
5814 pDescSs->Long.au64[0] = 0;
5815 pDescSs->Long.au64[1] = 0;
5816 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
5817 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
5818 pDescSs->Long.Gen.u2Dpl = uDpl;
5819 pDescSs->Long.Gen.u1Present = 1;
5820 pDescSs->Long.Gen.u1Long = 1;
5821}
5822
5823
5824/**
5825 * Marks the selector descriptor as accessed (only non-system descriptors).
5826 *
5827 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
5828 * will therefore skip the limit checks.
5829 *
5830 * @returns Strict VBox status code.
5831 * @param pIemCpu The IEM per CPU.
5832 * @param uSel The selector.
5833 */
5834static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
5835{
5836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5837
5838 /*
5839 * Get the selector table base and calculate the entry address.
5840 */
5841 RTGCPTR GCPtr = uSel & X86_SEL_LDT
5842 ? pCtx->ldtrHid.u64Base
5843 : pCtx->gdtr.pGdt;
5844 GCPtr += uSel & X86_SEL_MASK;
5845
5846 /*
5847 * ASMAtomicBitSet will assert if the address is misaligned, so do some
5848 * ugly stuff to avoid this. This will make sure it's an atomic access
5849 * as well more or less remove any question about 8-bit or 32-bit accesss.
5850 */
5851 VBOXSTRICTRC rcStrict;
5852 uint32_t volatile *pu32;
5853 if ((GCPtr & 3) == 0)
5854 {
5855 /* The normal case, map the 32-bit bits around the accessed bit (40). */
5856 GCPtr += 2 + 2;
5857 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5858 if (rcStrict != VINF_SUCCESS)
5859 return rcStrict;
5860 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
5861 }
5862 else
5863 {
5864 /* The misaligned GDT/LDT case, map the whole thing. */
5865 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5866 if (rcStrict != VINF_SUCCESS)
5867 return rcStrict;
5868 switch ((uintptr_t)pu32 & 3)
5869 {
5870 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
5871 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
5872 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
5873 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
5874 }
5875 }
5876
5877 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
5878}
5879
5880/** @} */
5881
5882
5883/*
5884 * Include the C/C++ implementation of instruction.
5885 */
5886#include "IEMAllCImpl.cpp.h"
5887
5888
5889
5890/** @name "Microcode" macros.
5891 *
5892 * The idea is that we should be able to use the same code to interpret
5893 * instructions as well as recompiler instructions. Thus this obfuscation.
5894 *
5895 * @{
5896 */
5897#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
5898#define IEM_MC_END() }
5899#define IEM_MC_PAUSE() do {} while (0)
5900#define IEM_MC_CONTINUE() do {} while (0)
5901
5902/** Internal macro. */
5903#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5904 do \
5905 { \
5906 VBOXSTRICTRC rcStrict2 = a_Expr; \
5907 if (rcStrict2 != VINF_SUCCESS) \
5908 return rcStrict2; \
5909 } while (0)
5910
5911#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5912#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5913#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5914#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5915#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5916#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5917#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5918
5919#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5920#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
5921 do { \
5922 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
5923 return iemRaiseDeviceNotAvailable(pIemCpu); \
5924 } while (0)
5925#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
5926 do { \
5927 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
5928 return iemRaiseMathFault(pIemCpu); \
5929 } while (0)
5930#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
5931 do { \
5932 if (pIemCpu->uCpl != 0) \
5933 return iemRaiseGeneralProtectionFault0(pIemCpu); \
5934 } while (0)
5935
5936
5937#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5938#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5939#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5940#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5941#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5942#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
5943#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5944 uint32_t a_Name; \
5945 uint32_t *a_pName = &a_Name
5946#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5947 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5948
5949#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5950#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
5951
5952#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5953#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5954#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5955#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5956#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5957#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5958#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5959#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5960#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5961#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5962#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5963#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5964#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5965#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5966#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
5967#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5968#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
5969#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5970#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5971#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5972#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5973#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5974#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
5975#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5976#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5977#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
5978#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
5979
5980#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5981#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5982#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5983#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5984#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
5985#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
5986#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
5987#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
5988#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
5989#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
5990#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
5991 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
5992
5993#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5994#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5995/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
5996 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
5997#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5998#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5999#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6000
6001#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6002#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6003#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6004 do { \
6005 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6006 *pu32Reg += (a_u32Value); \
6007 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6008 } while (0)
6009#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6010
6011#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6012#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6013#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6014 do { \
6015 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6016 *pu32Reg -= (a_u32Value); \
6017 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6018 } while (0)
6019#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6020
6021#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6022#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6023#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6024#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6025#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6026#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6027#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6028
6029#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6030#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6031#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6032#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6033
6034#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6035#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6036#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6037
6038#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6039#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6040
6041#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6042#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6043#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6044
6045#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6046#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6047#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6048
6049#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6050
6051#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6052
6053#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6054#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6055#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6056 do { \
6057 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6058 *pu32Reg &= (a_u32Value); \
6059 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6060 } while (0)
6061#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6062
6063#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6064#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6065#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6066 do { \
6067 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6068 *pu32Reg |= (a_u32Value); \
6069 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6070 } while (0)
6071#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6072
6073
6074#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6075#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6076#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6077
6078#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6079
6080
6081#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6082 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6083#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6084 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6085#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6086 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6087
6088#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6089 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6090#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6091 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6092#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6093 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6094
6095#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6096 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6097#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6098 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6099#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6100 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6101
6102#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6103 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6104
6105#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6106 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6107#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6108 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6109
6110#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6111 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6112#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6113 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6114#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6115 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6116
6117
6118#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6119 do { \
6120 uint8_t u8Tmp; \
6121 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6122 (a_u16Dst) = u8Tmp; \
6123 } while (0)
6124#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6125 do { \
6126 uint8_t u8Tmp; \
6127 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6128 (a_u32Dst) = u8Tmp; \
6129 } while (0)
6130#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6131 do { \
6132 uint8_t u8Tmp; \
6133 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6134 (a_u64Dst) = u8Tmp; \
6135 } while (0)
6136#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6137 do { \
6138 uint16_t u16Tmp; \
6139 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6140 (a_u32Dst) = u16Tmp; \
6141 } while (0)
6142#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6143 do { \
6144 uint16_t u16Tmp; \
6145 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6146 (a_u64Dst) = u16Tmp; \
6147 } while (0)
6148#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6149 do { \
6150 uint32_t u32Tmp; \
6151 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6152 (a_u64Dst) = u32Tmp; \
6153 } while (0)
6154
6155#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6156 do { \
6157 uint8_t u8Tmp; \
6158 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6159 (a_u16Dst) = (int8_t)u8Tmp; \
6160 } while (0)
6161#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6162 do { \
6163 uint8_t u8Tmp; \
6164 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6165 (a_u32Dst) = (int8_t)u8Tmp; \
6166 } while (0)
6167#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6168 do { \
6169 uint8_t u8Tmp; \
6170 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6171 (a_u64Dst) = (int8_t)u8Tmp; \
6172 } while (0)
6173#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6174 do { \
6175 uint16_t u16Tmp; \
6176 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6177 (a_u32Dst) = (int16_t)u16Tmp; \
6178 } while (0)
6179#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6180 do { \
6181 uint16_t u16Tmp; \
6182 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6183 (a_u64Dst) = (int16_t)u16Tmp; \
6184 } while (0)
6185#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6186 do { \
6187 uint32_t u32Tmp; \
6188 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6189 (a_u64Dst) = (int32_t)u32Tmp; \
6190 } while (0)
6191
6192#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6193 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6194#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6195 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6196#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6197 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6198#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6199 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6200
6201#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6202 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6203#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6204 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6205#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6206 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6207#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6208 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6209
6210#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6211#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6212#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6213#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6214#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6215#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6216#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6217 do { \
6218 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6219 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6220 } while (0)
6221
6222
6223#define IEM_MC_PUSH_U16(a_u16Value) \
6224 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6225#define IEM_MC_PUSH_U32(a_u32Value) \
6226 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6227#define IEM_MC_PUSH_U64(a_u64Value) \
6228 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6229
6230#define IEM_MC_POP_U16(a_pu16Value) \
6231 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6232#define IEM_MC_POP_U32(a_pu32Value) \
6233 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6234#define IEM_MC_POP_U64(a_pu64Value) \
6235 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6236
6237/** Maps guest memory for direct or bounce buffered access.
6238 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6239 * @remarks May return.
6240 */
6241#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6242 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6243
6244/** Maps guest memory for direct or bounce buffered access.
6245 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6246 * @remarks May return.
6247 */
6248#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6249 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6250
6251/** Commits the memory and unmaps the guest memory.
6252 * @remarks May return.
6253 */
6254#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6255 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6256
6257/** Commits the memory and unmaps the guest memory unless the FPU status word
6258 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6259 * that would cause FLD not to store.
6260 *
6261 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6262 * store, while \#P will not.
6263 *
6264 * @remarks May in theory return - for now.
6265 */
6266#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6267 do { \
6268 if ( !(a_u16FSW & X86_FSW_ES) \
6269 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6270 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6271 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6272 } while (0)
6273
6274/** Calculate efficient address from R/M. */
6275#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6276 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6277
6278#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6279#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6280#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6281#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6282#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6283
6284/**
6285 * Defers the rest of the instruction emulation to a C implementation routine
6286 * and returns, only taking the standard parameters.
6287 *
6288 * @param a_pfnCImpl The pointer to the C routine.
6289 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6290 */
6291#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6292
6293/**
6294 * Defers the rest of instruction emulation to a C implementation routine and
6295 * returns, taking one argument in addition to the standard ones.
6296 *
6297 * @param a_pfnCImpl The pointer to the C routine.
6298 * @param a0 The argument.
6299 */
6300#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6301
6302/**
6303 * Defers the rest of the instruction emulation to a C implementation routine
6304 * and returns, taking two arguments in addition to the standard ones.
6305 *
6306 * @param a_pfnCImpl The pointer to the C routine.
6307 * @param a0 The first extra argument.
6308 * @param a1 The second extra argument.
6309 */
6310#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6311
6312/**
6313 * Defers the rest of the instruction emulation to a C implementation routine
6314 * and returns, taking two arguments in addition to the standard ones.
6315 *
6316 * @param a_pfnCImpl The pointer to the C routine.
6317 * @param a0 The first extra argument.
6318 * @param a1 The second extra argument.
6319 * @param a2 The third extra argument.
6320 */
6321#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6322
6323/**
6324 * Defers the rest of the instruction emulation to a C implementation routine
6325 * and returns, taking two arguments in addition to the standard ones.
6326 *
6327 * @param a_pfnCImpl The pointer to the C routine.
6328 * @param a0 The first extra argument.
6329 * @param a1 The second extra argument.
6330 * @param a2 The third extra argument.
6331 * @param a3 The fourth extra argument.
6332 * @param a4 The fifth extra argument.
6333 */
6334#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6335
6336/**
6337 * Defers the entire instruction emulation to a C implementation routine and
6338 * returns, only taking the standard parameters.
6339 *
6340 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6341 *
6342 * @param a_pfnCImpl The pointer to the C routine.
6343 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6344 */
6345#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6346
6347/**
6348 * Defers the entire instruction emulation to a C implementation routine and
6349 * returns, taking one argument in addition to the standard ones.
6350 *
6351 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6352 *
6353 * @param a_pfnCImpl The pointer to the C routine.
6354 * @param a0 The argument.
6355 */
6356#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6357
6358/**
6359 * Defers the entire instruction emulation to a C implementation routine and
6360 * returns, taking two arguments in addition to the standard ones.
6361 *
6362 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6363 *
6364 * @param a_pfnCImpl The pointer to the C routine.
6365 * @param a0 The first extra argument.
6366 * @param a1 The second extra argument.
6367 */
6368#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6369
6370/**
6371 * Defers the entire instruction emulation to a C implementation routine and
6372 * returns, taking three arguments in addition to the standard ones.
6373 *
6374 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6375 *
6376 * @param a_pfnCImpl The pointer to the C routine.
6377 * @param a0 The first extra argument.
6378 * @param a1 The second extra argument.
6379 * @param a2 The third extra argument.
6380 */
6381#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6382
6383/**
6384 * Calls a FPU assembly implementation taking one visible argument.
6385 *
6386 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6387 * @param a0 The first extra argument.
6388 */
6389#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6390 do { \
6391 iemFpuPrepareUsage(pIemCpu); \
6392 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6393 } while (0)
6394
6395/**
6396 * Calls a FPU assembly implementation taking two visible arguments.
6397 *
6398 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6399 * @param a0 The first extra argument.
6400 * @param a1 The second extra argument.
6401 */
6402#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6403 do { \
6404 iemFpuPrepareUsage(pIemCpu); \
6405 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6406 } while (0)
6407
6408/**
6409 * Calls a FPU assembly implementation taking three visible arguments.
6410 *
6411 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6412 * @param a0 The first extra argument.
6413 * @param a1 The second extra argument.
6414 * @param a2 The third extra argument.
6415 */
6416#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6417 do { \
6418 iemFpuPrepareUsage(pIemCpu); \
6419 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6420 } while (0)
6421
6422#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6423 do { \
6424 (a_FpuData).FSW = (a_FSW); \
6425 (a_FpuData).r80Result = *(a_pr80Value); \
6426 } while (0)
6427
6428/** Pushes FPU result onto the stack. */
6429#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6430 iemFpuPushResult(pIemCpu, &a_FpuData)
6431/** Pushes FPU result onto the stack and sets the FPUDP. */
6432#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6433 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6434
6435/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6436#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6437 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6438
6439/** Stores FPU result in a stack register. */
6440#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6441 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6442/** Stores FPU result in a stack register and pops the stack. */
6443#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6444 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6445/** Stores FPU result in a stack register and sets the FPUDP. */
6446#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6447 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6448/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6449 * stack. */
6450#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6451 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6452
6453/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6454#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6455 iemFpuUpdateOpcodeAndIp(pIemCpu)
6456/** Free a stack register (for FFREE and FFREEP). */
6457#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6458 iemFpuStackFree(pIemCpu, a_iStReg)
6459/** Increment the FPU stack pointer. */
6460#define IEM_MC_FPU_STACK_INC_TOP() \
6461 iemFpuStackIncTop(pIemCpu)
6462/** Decrement the FPU stack pointer. */
6463#define IEM_MC_FPU_STACK_DEC_TOP() \
6464 iemFpuStackDecTop(pIemCpu)
6465
6466/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6467#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6468 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6469/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6470#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6471 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6472/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6473#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6474 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6475/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6476#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6477 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6478/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6479 * stack. */
6480#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6481 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6482/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6483#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6484 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6485
6486/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6487#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6488 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6489/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6490 * stack. */
6491#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6492 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6493/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6494 * FPUDS. */
6495#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6496 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6497/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6498 * FPUDS. Pops stack. */
6499#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6500 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6501/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6502 * stack twice. */
6503#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6504 iemFpuStackUnderflowThenPopPop(pIemCpu)
6505/** Raises a FPU stack underflow exception for an instruction pushing a result
6506 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6507#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6508 iemFpuStackPushUnderflow(pIemCpu)
6509/** Raises a FPU stack underflow exception for an instruction pushing a result
6510 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6511#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6512 iemFpuStackPushUnderflowTwo(pIemCpu)
6513
6514/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6515 * FPUIP, FPUCS and FOP. */
6516#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6517 iemFpuStackPushOverflow(pIemCpu)
6518/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6519 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6520#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6521 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6522
6523#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6524#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6525#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6526#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6527#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6528 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6529 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6530#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
6531 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6532 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6533#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
6534 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6535 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6536 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6537#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
6538 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
6539 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6540 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6541#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
6542#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
6543#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
6544#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6545 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6546 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6547#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6548 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6549 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6550#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
6551 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6552 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6553#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6554 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
6555 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6556#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6557 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
6558 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6559#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
6560 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
6561 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
6562#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
6563#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
6564#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
6565 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
6566#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
6567 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
6568#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
6569 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
6570#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
6571 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
6572#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
6573 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
6574#define IEM_MC_IF_FCW_IM() \
6575 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
6576
6577#define IEM_MC_ELSE() } else {
6578#define IEM_MC_ENDIF() } do {} while (0)
6579
6580/** @} */
6581
6582
6583/** @name Opcode Debug Helpers.
6584 * @{
6585 */
6586#ifdef DEBUG
6587# define IEMOP_MNEMONIC(a_szMnemonic) \
6588 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
6589 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
6590# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
6591 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
6592 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
6593#else
6594# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
6595# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
6596#endif
6597
6598/** @} */
6599
6600
6601/** @name Opcode Helpers.
6602 * @{
6603 */
6604
6605/** The instruction allows no lock prefixing (in this encoding), throw #UD if
6606 * lock prefixed.
6607 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
6608#define IEMOP_HLP_NO_LOCK_PREFIX() \
6609 do \
6610 { \
6611 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
6612 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6613 } while (0)
6614
6615/** The instruction is not available in 64-bit mode, throw #UD if we're in
6616 * 64-bit mode. */
6617#define IEMOP_HLP_NO_64BIT() \
6618 do \
6619 { \
6620 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6621 return IEMOP_RAISE_INVALID_OPCODE(); \
6622 } while (0)
6623
6624/** The instruction defaults to 64-bit operand size if 64-bit mode. */
6625#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
6626 do \
6627 { \
6628 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
6629 iemRecalEffOpSize64Default(pIemCpu); \
6630 } while (0)
6631
6632/**
6633 * Done decoding.
6634 */
6635#define IEMOP_HLP_DONE_DECODING() \
6636 do \
6637 { \
6638 /*nothing for now, maybe later... */ \
6639 } while (0)
6640
6641/**
6642 * Done decoding, raise \#UD exception if lock prefix present.
6643 */
6644#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
6645 do \
6646 { \
6647 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
6648 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
6649 } while (0)
6650
6651
6652/**
6653 * Calculates the effective address of a ModR/M memory operand.
6654 *
6655 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
6656 *
6657 * @return Strict VBox status code.
6658 * @param pIemCpu The IEM per CPU data.
6659 * @param bRm The ModRM byte.
6660 * @param pGCPtrEff Where to return the effective address.
6661 */
6662static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
6663{
6664 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
6665 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6666#define SET_SS_DEF() \
6667 do \
6668 { \
6669 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
6670 pIemCpu->iEffSeg = X86_SREG_SS; \
6671 } while (0)
6672
6673/** @todo Check the effective address size crap! */
6674 switch (pIemCpu->enmEffAddrMode)
6675 {
6676 case IEMMODE_16BIT:
6677 {
6678 uint16_t u16EffAddr;
6679
6680 /* Handle the disp16 form with no registers first. */
6681 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
6682 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
6683 else
6684 {
6685 /* Get the displacment. */
6686 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6687 {
6688 case 0: u16EffAddr = 0; break;
6689 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
6690 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
6691 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
6692 }
6693
6694 /* Add the base and index registers to the disp. */
6695 switch (bRm & X86_MODRM_RM_MASK)
6696 {
6697 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
6698 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
6699 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
6700 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
6701 case 4: u16EffAddr += pCtx->si; break;
6702 case 5: u16EffAddr += pCtx->di; break;
6703 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
6704 case 7: u16EffAddr += pCtx->bx; break;
6705 }
6706 }
6707
6708 *pGCPtrEff = u16EffAddr;
6709 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
6710 return VINF_SUCCESS;
6711 }
6712
6713 case IEMMODE_32BIT:
6714 {
6715 uint32_t u32EffAddr;
6716
6717 /* Handle the disp32 form with no registers first. */
6718 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
6719 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
6720 else
6721 {
6722 /* Get the register (or SIB) value. */
6723 switch ((bRm & X86_MODRM_RM_MASK))
6724 {
6725 case 0: u32EffAddr = pCtx->eax; break;
6726 case 1: u32EffAddr = pCtx->ecx; break;
6727 case 2: u32EffAddr = pCtx->edx; break;
6728 case 3: u32EffAddr = pCtx->ebx; break;
6729 case 4: /* SIB */
6730 {
6731 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6732
6733 /* Get the index and scale it. */
6734 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
6735 {
6736 case 0: u32EffAddr = pCtx->eax; break;
6737 case 1: u32EffAddr = pCtx->ecx; break;
6738 case 2: u32EffAddr = pCtx->edx; break;
6739 case 3: u32EffAddr = pCtx->ebx; break;
6740 case 4: u32EffAddr = 0; /*none */ break;
6741 case 5: u32EffAddr = pCtx->ebp; break;
6742 case 6: u32EffAddr = pCtx->esi; break;
6743 case 7: u32EffAddr = pCtx->edi; break;
6744 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6745 }
6746 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6747
6748 /* add base */
6749 switch (bSib & X86_SIB_BASE_MASK)
6750 {
6751 case 0: u32EffAddr += pCtx->eax; break;
6752 case 1: u32EffAddr += pCtx->ecx; break;
6753 case 2: u32EffAddr += pCtx->edx; break;
6754 case 3: u32EffAddr += pCtx->ebx; break;
6755 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
6756 case 5:
6757 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6758 {
6759 u32EffAddr += pCtx->ebp;
6760 SET_SS_DEF();
6761 }
6762 else
6763 {
6764 uint32_t u32Disp;
6765 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6766 u32EffAddr += u32Disp;
6767 }
6768 break;
6769 case 6: u32EffAddr += pCtx->esi; break;
6770 case 7: u32EffAddr += pCtx->edi; break;
6771 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6772 }
6773 break;
6774 }
6775 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
6776 case 6: u32EffAddr = pCtx->esi; break;
6777 case 7: u32EffAddr = pCtx->edi; break;
6778 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6779 }
6780
6781 /* Get and add the displacement. */
6782 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6783 {
6784 case 0:
6785 break;
6786 case 1:
6787 {
6788 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6789 u32EffAddr += i8Disp;
6790 break;
6791 }
6792 case 2:
6793 {
6794 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6795 u32EffAddr += u32Disp;
6796 break;
6797 }
6798 default:
6799 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
6800 }
6801
6802 }
6803 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
6804 *pGCPtrEff = u32EffAddr;
6805 else
6806 {
6807 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
6808 *pGCPtrEff = u32EffAddr & UINT16_MAX;
6809 }
6810 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6811 return VINF_SUCCESS;
6812 }
6813
6814 case IEMMODE_64BIT:
6815 {
6816 uint64_t u64EffAddr;
6817
6818 /* Handle the rip+disp32 form with no registers first. */
6819 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
6820 {
6821 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
6822 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
6823 }
6824 else
6825 {
6826 /* Get the register (or SIB) value. */
6827 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
6828 {
6829 case 0: u64EffAddr = pCtx->rax; break;
6830 case 1: u64EffAddr = pCtx->rcx; break;
6831 case 2: u64EffAddr = pCtx->rdx; break;
6832 case 3: u64EffAddr = pCtx->rbx; break;
6833 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
6834 case 6: u64EffAddr = pCtx->rsi; break;
6835 case 7: u64EffAddr = pCtx->rdi; break;
6836 case 8: u64EffAddr = pCtx->r8; break;
6837 case 9: u64EffAddr = pCtx->r9; break;
6838 case 10: u64EffAddr = pCtx->r10; break;
6839 case 11: u64EffAddr = pCtx->r11; break;
6840 case 13: u64EffAddr = pCtx->r13; break;
6841 case 14: u64EffAddr = pCtx->r14; break;
6842 case 15: u64EffAddr = pCtx->r15; break;
6843 /* SIB */
6844 case 4:
6845 case 12:
6846 {
6847 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6848
6849 /* Get the index and scale it. */
6850 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
6851 {
6852 case 0: u64EffAddr = pCtx->rax; break;
6853 case 1: u64EffAddr = pCtx->rcx; break;
6854 case 2: u64EffAddr = pCtx->rdx; break;
6855 case 3: u64EffAddr = pCtx->rbx; break;
6856 case 4: u64EffAddr = 0; /*none */ break;
6857 case 5: u64EffAddr = pCtx->rbp; break;
6858 case 6: u64EffAddr = pCtx->rsi; break;
6859 case 7: u64EffAddr = pCtx->rdi; break;
6860 case 8: u64EffAddr = pCtx->r8; break;
6861 case 9: u64EffAddr = pCtx->r9; break;
6862 case 10: u64EffAddr = pCtx->r10; break;
6863 case 11: u64EffAddr = pCtx->r11; break;
6864 case 12: u64EffAddr = pCtx->r12; break;
6865 case 13: u64EffAddr = pCtx->r13; break;
6866 case 14: u64EffAddr = pCtx->r14; break;
6867 case 15: u64EffAddr = pCtx->r15; break;
6868 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6869 }
6870 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6871
6872 /* add base */
6873 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
6874 {
6875 case 0: u64EffAddr += pCtx->rax; break;
6876 case 1: u64EffAddr += pCtx->rcx; break;
6877 case 2: u64EffAddr += pCtx->rdx; break;
6878 case 3: u64EffAddr += pCtx->rbx; break;
6879 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6880 case 6: u64EffAddr += pCtx->rsi; break;
6881 case 7: u64EffAddr += pCtx->rdi; break;
6882 case 8: u64EffAddr += pCtx->r8; break;
6883 case 9: u64EffAddr += pCtx->r9; break;
6884 case 10: u64EffAddr += pCtx->r10; break;
6885 case 11: u64EffAddr += pCtx->r11; break;
6886 case 14: u64EffAddr += pCtx->r14; break;
6887 case 15: u64EffAddr += pCtx->r15; break;
6888 /* complicated encodings */
6889 case 5:
6890 case 13:
6891 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6892 {
6893 if (!pIemCpu->uRexB)
6894 {
6895 u64EffAddr += pCtx->rbp;
6896 SET_SS_DEF();
6897 }
6898 else
6899 u64EffAddr += pCtx->r13;
6900 }
6901 else
6902 {
6903 uint32_t u32Disp;
6904 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6905 u64EffAddr += (int32_t)u32Disp;
6906 }
6907 break;
6908 }
6909 break;
6910 }
6911 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6912 }
6913
6914 /* Get and add the displacement. */
6915 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6916 {
6917 case 0:
6918 break;
6919 case 1:
6920 {
6921 int8_t i8Disp;
6922 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6923 u64EffAddr += i8Disp;
6924 break;
6925 }
6926 case 2:
6927 {
6928 uint32_t u32Disp;
6929 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6930 u64EffAddr += (int32_t)u32Disp;
6931 break;
6932 }
6933 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6934 }
6935
6936 }
6937 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6938 *pGCPtrEff = u64EffAddr;
6939 else
6940 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6941 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6942 return VINF_SUCCESS;
6943 }
6944 }
6945
6946 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6947}
6948
6949/** @} */
6950
6951
6952
6953/*
6954 * Include the instructions
6955 */
6956#include "IEMAllInstructions.cpp.h"
6957
6958
6959
6960
6961#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6962
6963/**
6964 * Sets up execution verification mode.
6965 */
6966static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6967{
6968 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6969 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6970
6971 /*
6972 * Enable verification and/or logging.
6973 */
6974 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
6975 if ( pIemCpu->fNoRem
6976#if 0 /* auto enable on first paged protected mode interrupt */
6977 && pOrgCtx->eflags.Bits.u1IF
6978 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
6979 && TRPMHasTrap(pVCpu)
6980 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6981#endif
6982#if 0
6983 && pOrgCtx->cs == 0x10
6984 && ( pOrgCtx->rip == 0x90119e3e
6985 || pOrgCtx->rip == 0x901d9810
6986 )
6987#endif
6988#if 0 /* Auto enable DSL - FPU stuff. */
6989 && pOrgCtx->cs == 0x10
6990 && (// pOrgCtx->rip == 0xc02ec07f
6991 //|| pOrgCtx->rip == 0xc02ec082
6992 //|| pOrgCtx->rip == 0xc02ec0c9
6993 0
6994 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */
6995 )
6996#endif
6997#if 1 /* Auto enable DSL - fstp st0 stuff. */
6998 && pOrgCtx->cs == 0x23
6999 && pOrgCtx->rip == 0x804aff7
7000#endif
7001#if 0
7002 && pOrgCtx->rip == 0x9022bb3a
7003#endif
7004#if 0
7005 && 0
7006#endif
7007 )
7008 {
7009 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7010 RTLogFlags(NULL, "enabled");
7011 pIemCpu->fNoRem = false;
7012 }
7013
7014 /*
7015 * Switch state.
7016 */
7017 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7018 {
7019 static CPUMCTX s_DebugCtx; /* Ugly! */
7020
7021 s_DebugCtx = *pOrgCtx;
7022 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7023 }
7024
7025 /*
7026 * See if there is an interrupt pending in TRPM and inject it if we can.
7027 */
7028 if ( pOrgCtx->eflags.Bits.u1IF
7029 && TRPMHasTrap(pVCpu)
7030 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7031 {
7032 uint8_t u8TrapNo;
7033 TRPMEVENT enmType;
7034 RTGCUINT uErrCode;
7035 RTGCPTR uCr2;
7036 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
7037 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7038 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7039 TRPMResetTrap(pVCpu);
7040 }
7041
7042 /*
7043 * Reset the counters.
7044 */
7045 pIemCpu->cIOReads = 0;
7046 pIemCpu->cIOWrites = 0;
7047 pIemCpu->fUndefinedEFlags = 0;
7048
7049 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7050 {
7051 /*
7052 * Free all verification records.
7053 */
7054 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7055 pIemCpu->pIemEvtRecHead = NULL;
7056 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7057 do
7058 {
7059 while (pEvtRec)
7060 {
7061 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7062 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7063 pIemCpu->pFreeEvtRec = pEvtRec;
7064 pEvtRec = pNext;
7065 }
7066 pEvtRec = pIemCpu->pOtherEvtRecHead;
7067 pIemCpu->pOtherEvtRecHead = NULL;
7068 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7069 } while (pEvtRec);
7070 }
7071}
7072
7073
7074/**
7075 * Allocate an event record.
7076 * @returns Pointer to a record.
7077 */
7078static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7079{
7080 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7081 return NULL;
7082
7083 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7084 if (pEvtRec)
7085 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7086 else
7087 {
7088 if (!pIemCpu->ppIemEvtRecNext)
7089 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7090
7091 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7092 if (!pEvtRec)
7093 return NULL;
7094 }
7095 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7096 pEvtRec->pNext = NULL;
7097 return pEvtRec;
7098}
7099
7100
7101/**
7102 * IOMMMIORead notification.
7103 */
7104VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7105{
7106 PVMCPU pVCpu = VMMGetCpu(pVM);
7107 if (!pVCpu)
7108 return;
7109 PIEMCPU pIemCpu = &pVCpu->iem.s;
7110 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7111 if (!pEvtRec)
7112 return;
7113 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7114 pEvtRec->u.RamRead.GCPhys = GCPhys;
7115 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7116 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7117 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7118}
7119
7120
7121/**
7122 * IOMMMIOWrite notification.
7123 */
7124VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7125{
7126 PVMCPU pVCpu = VMMGetCpu(pVM);
7127 if (!pVCpu)
7128 return;
7129 PIEMCPU pIemCpu = &pVCpu->iem.s;
7130 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7131 if (!pEvtRec)
7132 return;
7133 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7134 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7135 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7136 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7137 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7138 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7139 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7140 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7141 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7142}
7143
7144
7145/**
7146 * IOMIOPortRead notification.
7147 */
7148VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7149{
7150 PVMCPU pVCpu = VMMGetCpu(pVM);
7151 if (!pVCpu)
7152 return;
7153 PIEMCPU pIemCpu = &pVCpu->iem.s;
7154 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7155 if (!pEvtRec)
7156 return;
7157 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7158 pEvtRec->u.IOPortRead.Port = Port;
7159 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7160 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7161 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7162}
7163
7164/**
7165 * IOMIOPortWrite notification.
7166 */
7167VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7168{
7169 PVMCPU pVCpu = VMMGetCpu(pVM);
7170 if (!pVCpu)
7171 return;
7172 PIEMCPU pIemCpu = &pVCpu->iem.s;
7173 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7174 if (!pEvtRec)
7175 return;
7176 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7177 pEvtRec->u.IOPortWrite.Port = Port;
7178 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7179 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7180 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7181 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7182}
7183
7184
7185VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7186{
7187 AssertFailed();
7188}
7189
7190
7191VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7192{
7193 AssertFailed();
7194}
7195
7196
7197/**
7198 * Fakes and records an I/O port read.
7199 *
7200 * @returns VINF_SUCCESS.
7201 * @param pIemCpu The IEM per CPU data.
7202 * @param Port The I/O port.
7203 * @param pu32Value Where to store the fake value.
7204 * @param cbValue The size of the access.
7205 */
7206static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7207{
7208 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7209 if (pEvtRec)
7210 {
7211 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7212 pEvtRec->u.IOPortRead.Port = Port;
7213 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7214 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7215 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7216 }
7217 pIemCpu->cIOReads++;
7218 *pu32Value = 0xcccccccc;
7219 return VINF_SUCCESS;
7220}
7221
7222
7223/**
7224 * Fakes and records an I/O port write.
7225 *
7226 * @returns VINF_SUCCESS.
7227 * @param pIemCpu The IEM per CPU data.
7228 * @param Port The I/O port.
7229 * @param u32Value The value being written.
7230 * @param cbValue The size of the access.
7231 */
7232static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7233{
7234 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7235 if (pEvtRec)
7236 {
7237 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7238 pEvtRec->u.IOPortWrite.Port = Port;
7239 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7240 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7241 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7242 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7243 }
7244 pIemCpu->cIOWrites++;
7245 return VINF_SUCCESS;
7246}
7247
7248
7249/**
7250 * Used to add extra details about a stub case.
7251 * @param pIemCpu The IEM per CPU state.
7252 */
7253static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7254{
7255 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7256 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7257 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7258 char szRegs[4096];
7259 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7260 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7261 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7262 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7263 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7264 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7265 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7266 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7267 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7268 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7269 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7270 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7271 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7272 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7273 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7274 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7275 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7276 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7277 " efer=%016VR{efer}\n"
7278 " pat=%016VR{pat}\n"
7279 " sf_mask=%016VR{sf_mask}\n"
7280 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7281 " lstar=%016VR{lstar}\n"
7282 " star=%016VR{star} cstar=%016VR{cstar}\n"
7283 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7284 );
7285
7286 char szInstr1[256];
7287 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
7288 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7289 szInstr1, sizeof(szInstr1), NULL);
7290 char szInstr2[256];
7291 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
7292 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7293 szInstr2, sizeof(szInstr2), NULL);
7294
7295 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7296}
7297
7298
7299/**
7300 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7301 * dump to the assertion info.
7302 *
7303 * @param pEvtRec The record to dump.
7304 */
7305static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7306{
7307 switch (pEvtRec->enmEvent)
7308 {
7309 case IEMVERIFYEVENT_IOPORT_READ:
7310 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7311 pEvtRec->u.IOPortWrite.Port,
7312 pEvtRec->u.IOPortWrite.cbValue);
7313 break;
7314 case IEMVERIFYEVENT_IOPORT_WRITE:
7315 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7316 pEvtRec->u.IOPortWrite.Port,
7317 pEvtRec->u.IOPortWrite.cbValue,
7318 pEvtRec->u.IOPortWrite.u32Value);
7319 break;
7320 case IEMVERIFYEVENT_RAM_READ:
7321 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7322 pEvtRec->u.RamRead.GCPhys,
7323 pEvtRec->u.RamRead.cb);
7324 break;
7325 case IEMVERIFYEVENT_RAM_WRITE:
7326 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7327 pEvtRec->u.RamWrite.GCPhys,
7328 pEvtRec->u.RamWrite.cb,
7329 (int)pEvtRec->u.RamWrite.cb,
7330 pEvtRec->u.RamWrite.ab);
7331 break;
7332 default:
7333 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7334 break;
7335 }
7336}
7337
7338
7339/**
7340 * Raises an assertion on the specified record, showing the given message with
7341 * a record dump attached.
7342 *
7343 * @param pIemCpu The IEM per CPU data.
7344 * @param pEvtRec1 The first record.
7345 * @param pEvtRec2 The second record.
7346 * @param pszMsg The message explaining why we're asserting.
7347 */
7348static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7349{
7350 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7351 iemVerifyAssertAddRecordDump(pEvtRec1);
7352 iemVerifyAssertAddRecordDump(pEvtRec2);
7353 iemVerifyAssertMsg2(pIemCpu);
7354 RTAssertPanic();
7355}
7356
7357
7358/**
7359 * Raises an assertion on the specified record, showing the given message with
7360 * a record dump attached.
7361 *
7362 * @param pIemCpu The IEM per CPU data.
7363 * @param pEvtRec1 The first record.
7364 * @param pszMsg The message explaining why we're asserting.
7365 */
7366static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7367{
7368 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7369 iemVerifyAssertAddRecordDump(pEvtRec);
7370 iemVerifyAssertMsg2(pIemCpu);
7371 RTAssertPanic();
7372}
7373
7374
7375/**
7376 * Verifies a write record.
7377 *
7378 * @param pIemCpu The IEM per CPU data.
7379 * @param pEvtRec The write record.
7380 */
7381static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7382{
7383 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7384 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7385 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7386 if ( RT_FAILURE(rc)
7387 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7388 {
7389 /* fend off ins */
7390 if ( !pIemCpu->cIOReads
7391 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7392 || ( pEvtRec->u.RamWrite.cb != 1
7393 && pEvtRec->u.RamWrite.cb != 2
7394 && pEvtRec->u.RamWrite.cb != 4) )
7395 {
7396 /* fend off ROMs */
7397 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7398 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7399 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7400 {
7401 /* fend off fxsave */
7402 if (pEvtRec->u.RamWrite.cb != 512)
7403 {
7404 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7405 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7406 RTAssertMsg2Add("REM: %.*Rhxs\n"
7407 "IEM: %.*Rhxs\n",
7408 pEvtRec->u.RamWrite.cb, abBuf,
7409 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7410 iemVerifyAssertAddRecordDump(pEvtRec);
7411 iemVerifyAssertMsg2(pIemCpu);
7412 RTAssertPanic();
7413 }
7414 }
7415 }
7416 }
7417
7418}
7419
7420/**
7421 * Performs the post-execution verfication checks.
7422 */
7423static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7424{
7425 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7426 return;
7427
7428 /*
7429 * Switch back the state.
7430 */
7431 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7432 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7433 Assert(pOrgCtx != pDebugCtx);
7434 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7435
7436 /*
7437 * Execute the instruction in REM.
7438 */
7439 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7440 EMRemLock(pVM);
7441 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7442 AssertRC(rc);
7443 EMRemUnlock(pVM);
7444
7445 /*
7446 * Compare the register states.
7447 */
7448 unsigned cDiffs = 0;
7449 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7450 {
7451 Log(("REM and IEM ends up with different registers!\n"));
7452
7453# define CHECK_FIELD(a_Field) \
7454 do \
7455 { \
7456 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7457 { \
7458 switch (sizeof(pOrgCtx->a_Field)) \
7459 { \
7460 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7461 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7462 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7463 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7464 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
7465 } \
7466 cDiffs++; \
7467 } \
7468 } while (0)
7469
7470# define CHECK_BIT_FIELD(a_Field) \
7471 do \
7472 { \
7473 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7474 { \
7475 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
7476 cDiffs++; \
7477 } \
7478 } while (0)
7479
7480# define CHECK_SEL(a_Sel) \
7481 do \
7482 { \
7483 CHECK_FIELD(a_Sel); \
7484 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
7485 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
7486 { \
7487 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
7488 cDiffs++; \
7489 } \
7490 CHECK_FIELD(a_Sel##Hid.u64Base); \
7491 CHECK_FIELD(a_Sel##Hid.u32Limit); \
7492 } while (0)
7493
7494#if 1 /* The recompiler doesn't update these the intel way. */
7495 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
7496 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
7497 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
7498 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
7499 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
7500 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
7501 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
7502 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
7503 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
7504 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
7505#endif
7506 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
7507 {
7508 RTAssertMsg2Weak(" the FPU state differs\n");
7509 cDiffs++;
7510 CHECK_FIELD(fpu.FCW);
7511 CHECK_FIELD(fpu.FSW);
7512 CHECK_FIELD(fpu.FTW);
7513 CHECK_FIELD(fpu.FOP);
7514 CHECK_FIELD(fpu.FPUIP);
7515 CHECK_FIELD(fpu.CS);
7516 CHECK_FIELD(fpu.Rsrvd1);
7517 CHECK_FIELD(fpu.FPUDP);
7518 CHECK_FIELD(fpu.DS);
7519 CHECK_FIELD(fpu.Rsrvd2);
7520 CHECK_FIELD(fpu.MXCSR);
7521 CHECK_FIELD(fpu.MXCSR_MASK);
7522 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
7523 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
7524 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
7525 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
7526 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
7527 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
7528 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
7529 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
7530 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
7531 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
7532 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
7533 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
7534 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
7535 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
7536 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
7537 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
7538 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
7539 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
7540 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
7541 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
7542 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
7543 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
7544 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
7545 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
7546 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
7547 CHECK_FIELD(fpu.au32RsrvdRest[i]);
7548 }
7549 CHECK_FIELD(rip);
7550 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
7551 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
7552 {
7553 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
7554 CHECK_BIT_FIELD(rflags.Bits.u1CF);
7555 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
7556 CHECK_BIT_FIELD(rflags.Bits.u1PF);
7557 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
7558 CHECK_BIT_FIELD(rflags.Bits.u1AF);
7559 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
7560 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
7561 CHECK_BIT_FIELD(rflags.Bits.u1SF);
7562 CHECK_BIT_FIELD(rflags.Bits.u1TF);
7563 CHECK_BIT_FIELD(rflags.Bits.u1IF);
7564 CHECK_BIT_FIELD(rflags.Bits.u1DF);
7565 CHECK_BIT_FIELD(rflags.Bits.u1OF);
7566 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
7567 CHECK_BIT_FIELD(rflags.Bits.u1NT);
7568 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
7569 CHECK_BIT_FIELD(rflags.Bits.u1RF);
7570 CHECK_BIT_FIELD(rflags.Bits.u1VM);
7571 CHECK_BIT_FIELD(rflags.Bits.u1AC);
7572 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
7573 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
7574 CHECK_BIT_FIELD(rflags.Bits.u1ID);
7575 }
7576
7577 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
7578 CHECK_FIELD(rax);
7579 CHECK_FIELD(rcx);
7580 if (!pIemCpu->fIgnoreRaxRdx)
7581 CHECK_FIELD(rdx);
7582 CHECK_FIELD(rbx);
7583 CHECK_FIELD(rsp);
7584 CHECK_FIELD(rbp);
7585 CHECK_FIELD(rsi);
7586 CHECK_FIELD(rdi);
7587 CHECK_FIELD(r8);
7588 CHECK_FIELD(r9);
7589 CHECK_FIELD(r10);
7590 CHECK_FIELD(r11);
7591 CHECK_FIELD(r12);
7592 CHECK_FIELD(r13);
7593 CHECK_SEL(cs);
7594 CHECK_SEL(ss);
7595 CHECK_SEL(ds);
7596 CHECK_SEL(es);
7597 CHECK_SEL(fs);
7598 CHECK_SEL(gs);
7599 CHECK_FIELD(cr0);
7600 CHECK_FIELD(cr2);
7601 CHECK_FIELD(cr3);
7602 CHECK_FIELD(cr4);
7603 CHECK_FIELD(dr[0]);
7604 CHECK_FIELD(dr[1]);
7605 CHECK_FIELD(dr[2]);
7606 CHECK_FIELD(dr[3]);
7607 CHECK_FIELD(dr[6]);
7608 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
7609 CHECK_FIELD(dr[7]);
7610 CHECK_FIELD(gdtr.cbGdt);
7611 CHECK_FIELD(gdtr.pGdt);
7612 CHECK_FIELD(idtr.cbIdt);
7613 CHECK_FIELD(idtr.pIdt);
7614 CHECK_FIELD(ldtr);
7615 CHECK_FIELD(ldtrHid.u64Base);
7616 CHECK_FIELD(ldtrHid.u32Limit);
7617 CHECK_FIELD(ldtrHid.Attr.u);
7618 CHECK_FIELD(tr);
7619 CHECK_FIELD(trHid.u64Base);
7620 CHECK_FIELD(trHid.u32Limit);
7621 CHECK_FIELD(trHid.Attr.u);
7622 CHECK_FIELD(SysEnter.cs);
7623 CHECK_FIELD(SysEnter.eip);
7624 CHECK_FIELD(SysEnter.esp);
7625 CHECK_FIELD(msrEFER);
7626 CHECK_FIELD(msrSTAR);
7627 CHECK_FIELD(msrPAT);
7628 CHECK_FIELD(msrLSTAR);
7629 CHECK_FIELD(msrCSTAR);
7630 CHECK_FIELD(msrSFMASK);
7631 CHECK_FIELD(msrKERNELGSBASE);
7632
7633 if (cDiffs != 0)
7634 {
7635 if (LogIs3Enabled())
7636 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
7637 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
7638 iemVerifyAssertMsg2(pIemCpu);
7639 RTAssertPanic();
7640 }
7641# undef CHECK_FIELD
7642# undef CHECK_BIT_FIELD
7643 }
7644
7645 /*
7646 * If the register state compared fine, check the verification event
7647 * records.
7648 */
7649 if (cDiffs == 0)
7650 {
7651 /*
7652 * Compare verficiation event records.
7653 * - I/O port accesses should be a 1:1 match.
7654 */
7655 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
7656 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
7657 while (pIemRec && pOtherRec)
7658 {
7659 /* Since we might miss RAM writes and reads, ignore reads and check
7660 that any written memory is the same extra ones. */
7661 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
7662 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
7663 && pIemRec->pNext)
7664 {
7665 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
7666 iemVerifyWriteRecord(pIemCpu, pIemRec);
7667 pIemRec = pIemRec->pNext;
7668 }
7669
7670 /* Do the compare. */
7671 if (pIemRec->enmEvent != pOtherRec->enmEvent)
7672 {
7673 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
7674 break;
7675 }
7676 bool fEquals;
7677 switch (pIemRec->enmEvent)
7678 {
7679 case IEMVERIFYEVENT_IOPORT_READ:
7680 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
7681 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
7682 break;
7683 case IEMVERIFYEVENT_IOPORT_WRITE:
7684 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
7685 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
7686 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
7687 break;
7688 case IEMVERIFYEVENT_RAM_READ:
7689 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
7690 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
7691 break;
7692 case IEMVERIFYEVENT_RAM_WRITE:
7693 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
7694 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
7695 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
7696 break;
7697 default:
7698 fEquals = false;
7699 break;
7700 }
7701 if (!fEquals)
7702 {
7703 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
7704 break;
7705 }
7706
7707 /* advance */
7708 pIemRec = pIemRec->pNext;
7709 pOtherRec = pOtherRec->pNext;
7710 }
7711
7712 /* Ignore extra writes and reads. */
7713 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
7714 {
7715 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
7716 iemVerifyWriteRecord(pIemCpu, pIemRec);
7717 pIemRec = pIemRec->pNext;
7718 }
7719 if (pIemRec != NULL)
7720 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
7721 else if (pOtherRec != NULL)
7722 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
7723 }
7724 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7725
7726#if 0
7727 /*
7728 * HACK ALERT! You don't normally want to verify a whole boot sequence.
7729 */
7730 if (pIemCpu->cInstructions == 1)
7731 RTLogFlags(NULL, "disabled");
7732#endif
7733}
7734
7735#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
7736
7737/* stubs */
7738static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7739{
7740 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
7741 return VERR_INTERNAL_ERROR;
7742}
7743
7744static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7745{
7746 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
7747 return VERR_INTERNAL_ERROR;
7748}
7749
7750#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
7751
7752
7753/**
7754 * Updates the real CPU context structure with the context core (from the trap
7755 * stack frame) before interpreting any instructions.
7756 *
7757 * @param pCtx The real CPU context.
7758 * @param pCtxCore The trap stack CPU core context.
7759 */
7760DECLINLINE(void) iemCtxCoreToCtx(PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore)
7761{
7762 PCPUMCTXCORE pDst = CPUMCTX2CORE(pCtx);
7763 if (pDst != pCtxCore)
7764 *pDst = *pCtxCore;
7765}
7766
7767
7768/**
7769 * Updates the context core (from the trap stack frame) with the updated values
7770 * from the real CPU context structure after instruction emulation.
7771 *
7772 * @param pCtx The real CPU context.
7773 * @param pCtxCore The trap stack CPU core context.
7774 */
7775DECLINLINE(void) iemCtxToCtxCore(PCPUMCTXCORE pCtxCore, PCCPUMCTX pCtx)
7776{
7777 PCCPUMCTXCORE pSrc = CPUMCTX2CORE(pCtx);
7778 if (pSrc != pCtxCore)
7779 *pCtxCore = *pSrc;
7780}
7781
7782
7783/**
7784 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
7785 * IEMExecOneWithPrefetchedByPC.
7786 *
7787 * @return Strict VBox status code.
7788 * @param pVCpu The current virtual CPU.
7789 * @param pIemCpu The IEM per CPU data.
7790 */
7791DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu)
7792{
7793 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7794 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7795 if (rcStrict == VINF_SUCCESS)
7796 pIemCpu->cInstructions++;
7797//#ifdef DEBUG
7798// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
7799//#endif
7800
7801 /* Execute the next instruction as well if a cli, pop ss or
7802 mov ss, Gr has just completed successfully. */
7803 if ( rcStrict == VINF_SUCCESS
7804 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7805 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
7806 {
7807 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7808 if (rcStrict == VINF_SUCCESS)
7809 {
7810 b; IEM_OPCODE_GET_NEXT_U8(&b);
7811 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7812 if (rcStrict == VINF_SUCCESS)
7813 pIemCpu->cInstructions++;
7814 }
7815 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
7816 }
7817
7818 return rcStrict;
7819}
7820
7821
7822/**
7823 * Execute one instruction.
7824 *
7825 * @return Strict VBox status code.
7826 * @param pVCpu The current virtual CPU.
7827 */
7828VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
7829{
7830 PIEMCPU pIemCpu = &pVCpu->iem.s;
7831
7832#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7833 iemExecVerificationModeSetup(pIemCpu);
7834#endif
7835#ifdef LOG_ENABLED
7836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7837 if (LogIs2Enabled())
7838 {
7839 char szInstr[256];
7840 uint32_t cbInstr = 0;
7841 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
7842 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7843 szInstr, sizeof(szInstr), &cbInstr);
7844
7845 Log3(("**** "
7846 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
7847 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
7848 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
7849 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
7850 " %s\n"
7851 ,
7852 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
7853 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
7854 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
7855 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
7856 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
7857 szInstr));
7858
7859 if (LogIs3Enabled())
7860 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
7861 }
7862 else
7863 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
7864 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u));
7865#endif
7866
7867 /*
7868 * Do the decoding and emulation.
7869 */
7870 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7871 if (rcStrict == VINF_SUCCESS)
7872 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7873
7874#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7875 /*
7876 * Assert some sanity.
7877 */
7878 iemExecVerificationModeCheck(pIemCpu);
7879#endif
7880 if (rcStrict != VINF_SUCCESS)
7881 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
7882 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
7883 return rcStrict;
7884}
7885
7886
7887VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
7888{
7889 PIEMCPU pIemCpu = &pVCpu->iem.s;
7890 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7891
7892 iemCtxCoreToCtx(pCtx, pCtxCore);
7893 iemInitDecoder(pIemCpu);
7894 uint32_t const cbOldWritten = pIemCpu->cbWritten;
7895
7896 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7897 if (rcStrict == VINF_SUCCESS)
7898 {
7899 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7900 if (rcStrict == VINF_SUCCESS)
7901 iemCtxToCtxCore(pCtxCore, pCtx);
7902 if (pcbWritten)
7903 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
7904 }
7905 return rcStrict;
7906}
7907
7908
7909VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
7910 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
7911{
7912 PIEMCPU pIemCpu = &pVCpu->iem.s;
7913 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7914
7915 iemCtxCoreToCtx(pCtx, pCtxCore);
7916
7917 VBOXSTRICTRC rcStrict;
7918 if (cbOpcodeBytes)
7919 {
7920 iemInitDecoder(pIemCpu);
7921 pIemCpu->cbOpcode = RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
7922 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
7923 rcStrict = VINF_SUCCESS;
7924 }
7925 else
7926 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7927 if (rcStrict == VINF_SUCCESS)
7928 {
7929 rcStrict = iemExecOneInner(pVCpu, pIemCpu);
7930 if (rcStrict == VINF_SUCCESS)
7931 iemCtxToCtxCore(pCtxCore, pCtx);
7932 }
7933 return rcStrict;
7934}
7935
7936
7937VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
7938{
7939 return IEMExecOne(pVCpu);
7940}
7941
7942
7943
7944/**
7945 * Injects a trap, fault, abort, software interrupt or external interrupt.
7946 *
7947 * The parameter list matches TRPMQueryTrapAll pretty closely.
7948 *
7949 * @returns Strict VBox status code.
7950 * @param pVCpu The current virtual CPU.
7951 * @param u8TrapNo The trap number.
7952 * @param enmType What type is it (trap/fault/abort), software
7953 * interrupt or hardware interrupt.
7954 * @param uErrCode The error code if applicable.
7955 * @param uCr2 The CR2 value if applicable.
7956 */
7957VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
7958{
7959 iemInitDecoder(&pVCpu->iem.s);
7960
7961 uint32_t fFlags;
7962 switch (enmType)
7963 {
7964 case TRPM_HARDWARE_INT:
7965 LogFlow(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
7966 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
7967 uErrCode = uCr2 = 0;
7968 break;
7969
7970 case TRPM_SOFTWARE_INT:
7971 LogFlow(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
7972 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7973 uErrCode = uCr2 = 0;
7974 break;
7975
7976 case TRPM_TRAP:
7977 LogFlow(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
7978 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
7979 if (u8TrapNo == X86_XCPT_PF)
7980 fFlags |= IEM_XCPT_FLAGS_CR2;
7981 switch (u8TrapNo)
7982 {
7983 case X86_XCPT_DF:
7984 case X86_XCPT_TS:
7985 case X86_XCPT_NP:
7986 case X86_XCPT_SS:
7987 case X86_XCPT_PF:
7988 case X86_XCPT_AC:
7989 fFlags |= IEM_XCPT_FLAGS_ERR;
7990 break;
7991 }
7992 break;
7993
7994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7995 }
7996
7997 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
7998}
7999
8000
8001VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8002{
8003 return VERR_NOT_IMPLEMENTED;
8004}
8005
8006
8007VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8008{
8009 return VERR_NOT_IMPLEMENTED;
8010}
8011
8012
8013#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8014/**
8015 * Executes a IRET instruction with default operand size.
8016 *
8017 * This is for PATM.
8018 *
8019 * @returns VBox status code.
8020 * @param pVCpu The current virtual CPU.
8021 * @param pCtxCore The register frame.
8022 */
8023VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8024{
8025 PIEMCPU pIemCpu = &pVCpu->iem.s;
8026 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8027
8028 iemCtxCoreToCtx(pCtx, pCtxCore);
8029 iemInitDecoder(pIemCpu);
8030 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8031 if (rcStrict == VINF_SUCCESS)
8032 iemCtxToCtxCore(pCtxCore, pCtx);
8033 else
8034 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8035 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8036 return rcStrict;
8037}
8038#endif
8039
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette