VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 36860

Last change on this file since 36860 was 36860, checked in by vboxsync, 14 years ago

IEM: rdtsc, mov DRx, ltr, lldt. cmovnle fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 187.3 KB
Line 
1/* $Id: IEMAll.cpp 36860 2011-04-27 17:31:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_EM /** @todo add log group */
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/x86.h>
64#include <iprt/assert.h>
65#include <iprt/string.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** @typedef PFNIEMOP
72 * Pointer to an opcode decoder function.
73 */
74
75/** @def FNIEMOP_DEF
76 * Define an opcode decoder function.
77 *
78 * We're using macors for this so that adding and removing parameters as well as
79 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
80 *
81 * @param a_Name The function name.
82 */
83
84
85#if defined(__GNUC__) && defined(RT_ARCH_X86)
86typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
87# define FNIEMOP_DEF(a_Name) \
88 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
89# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
90 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
91# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
92 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
93
94#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
95typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
96# define FNIEMOP_DEF(a_Name) \
97 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
98# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
99 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
100# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
101 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
102
103#else
104typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
105# define FNIEMOP_DEF(a_Name) \
106 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
107# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
108 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
109# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
110 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
111
112#endif
113
114
115/**
116 * Function table for a binary operator providing implementation based on
117 * operand size.
118 */
119typedef struct IEMOPBINSIZES
120{
121 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
122 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
123 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
124 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
125} IEMOPBINSIZES;
126/** Pointer to a binary operator function table. */
127typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
128
129
130/**
131 * Function table for a unary operator providing implementation based on
132 * operand size.
133 */
134typedef struct IEMOPUNARYSIZES
135{
136 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
137 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
138 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
139 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
140} IEMOPUNARYSIZES;
141/** Pointer to a unary operator function table. */
142typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
143
144
145/**
146 * Function table for a shift operator providing implementation based on
147 * operand size.
148 */
149typedef struct IEMOPSHIFTSIZES
150{
151 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
152 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
153 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
154 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
155} IEMOPSHIFTSIZES;
156/** Pointer to a shift operator function table. */
157typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
158
159
160/**
161 * Function table for a multiplication or division operation.
162 */
163typedef struct IEMOPMULDIVSIZES
164{
165 PFNIEMAIMPLMULDIVU8 pfnU8;
166 PFNIEMAIMPLMULDIVU16 pfnU16;
167 PFNIEMAIMPLMULDIVU32 pfnU32;
168 PFNIEMAIMPLMULDIVU64 pfnU64;
169} IEMOPMULDIVSIZES;
170/** Pointer to a multiplication or division operation function table. */
171typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
172
173
174/**
175 * Function table for a double precision shift operator providing implementation
176 * based on operand size.
177 */
178typedef struct IEMOPSHIFTDBLSIZES
179{
180 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
181 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
182 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
183} IEMOPSHIFTDBLSIZES;
184/** Pointer to a double precision shift function table. */
185typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
186
187
188/**
189 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
190 */
191typedef union IEMSELDESC
192{
193 /** The legacy view. */
194 X86DESC Legacy;
195 /** The long mode view. */
196 X86DESC64 Long;
197} IEMSELDESC;
198/** Pointer to a selector descriptor table entry. */
199typedef IEMSELDESC *PIEMSELDESC;
200
201
202/*******************************************************************************
203* Defined Constants And Macros *
204*******************************************************************************/
205/** Temporary hack to disable the double execution. Will be removed in favor
206 * of a dedicated execution mode in EM. */
207//#define IEM_VERIFICATION_MODE_NO_REM
208
209/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
210 * due to GCC lacking knowledge about the value range of a switch. */
211#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)
212
213/**
214 * Call an opcode decoder function.
215 *
216 * We're using macors for this so that adding and removing parameters can be
217 * done as we please. See FNIEMOP_DEF.
218 */
219#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
220
221/**
222 * Call a common opcode decoder function taking one extra argument.
223 *
224 * We're using macors for this so that adding and removing parameters can be
225 * done as we please. See FNIEMOP_DEF_1.
226 */
227#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
228
229/**
230 * Call a common opcode decoder function taking one extra argument.
231 *
232 * We're using macors for this so that adding and removing parameters can be
233 * done as we please. See FNIEMOP_DEF_1.
234 */
235#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
236
237/**
238 * Check if we're currently executing in real or virtual 8086 mode.
239 *
240 * @returns @c true if it is, @c false if not.
241 * @param a_pIemCpu The IEM state of the current CPU.
242 */
243#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
244
245/**
246 * Check if we're currently executing in long mode.
247 *
248 * @returns @c true if it is, @c false if not.
249 * @param a_pIemCpu The IEM state of the current CPU.
250 */
251#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
252
253/**
254 * Check if we're currently executing in real mode.
255 *
256 * @returns @c true if it is, @c false if not.
257 * @param a_pIemCpu The IEM state of the current CPU.
258 */
259#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
260
261/**
262 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
263 */
264#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
265
266/**
267 * Checks if a intel CPUID feature is present.
268 */
269#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
270 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
271 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
272
273/**
274 * Check if the address is canonical.
275 */
276#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
277
278
279/*******************************************************************************
280* Global Variables *
281*******************************************************************************/
282extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
283
284
285/** Function table for the ADD instruction. */
286static const IEMOPBINSIZES g_iemAImpl_add =
287{
288 iemAImpl_add_u8, iemAImpl_add_u8_locked,
289 iemAImpl_add_u16, iemAImpl_add_u16_locked,
290 iemAImpl_add_u32, iemAImpl_add_u32_locked,
291 iemAImpl_add_u64, iemAImpl_add_u64_locked
292};
293
294/** Function table for the ADC instruction. */
295static const IEMOPBINSIZES g_iemAImpl_adc =
296{
297 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
298 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
299 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
300 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
301};
302
303/** Function table for the SUB instruction. */
304static const IEMOPBINSIZES g_iemAImpl_sub =
305{
306 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
307 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
308 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
309 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
310};
311
312/** Function table for the SBB instruction. */
313static const IEMOPBINSIZES g_iemAImpl_sbb =
314{
315 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
316 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
317 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
318 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
319};
320
321/** Function table for the OR instruction. */
322static const IEMOPBINSIZES g_iemAImpl_or =
323{
324 iemAImpl_or_u8, iemAImpl_or_u8_locked,
325 iemAImpl_or_u16, iemAImpl_or_u16_locked,
326 iemAImpl_or_u32, iemAImpl_or_u32_locked,
327 iemAImpl_or_u64, iemAImpl_or_u64_locked
328};
329
330/** Function table for the XOR instruction. */
331static const IEMOPBINSIZES g_iemAImpl_xor =
332{
333 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
334 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
335 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
336 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
337};
338
339/** Function table for the AND instruction. */
340static const IEMOPBINSIZES g_iemAImpl_and =
341{
342 iemAImpl_and_u8, iemAImpl_and_u8_locked,
343 iemAImpl_and_u16, iemAImpl_and_u16_locked,
344 iemAImpl_and_u32, iemAImpl_and_u32_locked,
345 iemAImpl_and_u64, iemAImpl_and_u64_locked
346};
347
348/** Function table for the CMP instruction.
349 * @remarks Making operand order ASSUMPTIONS.
350 */
351static const IEMOPBINSIZES g_iemAImpl_cmp =
352{
353 iemAImpl_cmp_u8, NULL,
354 iemAImpl_cmp_u16, NULL,
355 iemAImpl_cmp_u32, NULL,
356 iemAImpl_cmp_u64, NULL
357};
358
359/** Function table for the TEST instruction.
360 * @remarks Making operand order ASSUMPTIONS.
361 */
362static const IEMOPBINSIZES g_iemAImpl_test =
363{
364 iemAImpl_test_u8, NULL,
365 iemAImpl_test_u16, NULL,
366 iemAImpl_test_u32, NULL,
367 iemAImpl_test_u64, NULL
368};
369
370/** Function table for the BT instruction. */
371static const IEMOPBINSIZES g_iemAImpl_bt =
372{
373 NULL, NULL,
374 iemAImpl_bt_u16, NULL,
375 iemAImpl_bt_u32, NULL,
376 iemAImpl_bt_u64, NULL
377};
378
379/** Function table for the BTC instruction. */
380static const IEMOPBINSIZES g_iemAImpl_btc =
381{
382 NULL, NULL,
383 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
384 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
385 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
386};
387
388/** Function table for the BTR instruction. */
389static const IEMOPBINSIZES g_iemAImpl_btr =
390{
391 NULL, NULL,
392 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
393 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
394 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
395};
396
397/** Function table for the BTS instruction. */
398static const IEMOPBINSIZES g_iemAImpl_bts =
399{
400 NULL, NULL,
401 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
402 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
403 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
404};
405
406/** Function table for the BSF instruction. */
407static const IEMOPBINSIZES g_iemAImpl_bsf =
408{
409 NULL, NULL,
410 iemAImpl_bsf_u16, NULL,
411 iemAImpl_bsf_u32, NULL,
412 iemAImpl_bsf_u64, NULL
413};
414
415/** Function table for the BSR instruction. */
416static const IEMOPBINSIZES g_iemAImpl_bsr =
417{
418 NULL, NULL,
419 iemAImpl_bsr_u16, NULL,
420 iemAImpl_bsr_u32, NULL,
421 iemAImpl_bsr_u64, NULL
422};
423
424/** Function table for the IMUL instruction. */
425static const IEMOPBINSIZES g_iemAImpl_imul_two =
426{
427 NULL, NULL,
428 iemAImpl_imul_two_u16, NULL,
429 iemAImpl_imul_two_u32, NULL,
430 iemAImpl_imul_two_u64, NULL
431};
432
433/** Group 1 /r lookup table. */
434static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
435{
436 &g_iemAImpl_add,
437 &g_iemAImpl_or,
438 &g_iemAImpl_adc,
439 &g_iemAImpl_sbb,
440 &g_iemAImpl_and,
441 &g_iemAImpl_sub,
442 &g_iemAImpl_xor,
443 &g_iemAImpl_cmp
444};
445
446/** Function table for the INC instruction. */
447static const IEMOPUNARYSIZES g_iemAImpl_inc =
448{
449 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
450 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
451 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
452 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
453};
454
455/** Function table for the DEC instruction. */
456static const IEMOPUNARYSIZES g_iemAImpl_dec =
457{
458 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
459 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
460 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
461 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
462};
463
464/** Function table for the NEG instruction. */
465static const IEMOPUNARYSIZES g_iemAImpl_neg =
466{
467 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
468 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
469 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
470 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
471};
472
473/** Function table for the NOT instruction. */
474static const IEMOPUNARYSIZES g_iemAImpl_not =
475{
476 iemAImpl_not_u8, iemAImpl_not_u8_locked,
477 iemAImpl_not_u16, iemAImpl_not_u16_locked,
478 iemAImpl_not_u32, iemAImpl_not_u32_locked,
479 iemAImpl_not_u64, iemAImpl_not_u64_locked
480};
481
482
483/** Function table for the ROL instruction. */
484static const IEMOPSHIFTSIZES g_iemAImpl_rol =
485{
486 iemAImpl_rol_u8,
487 iemAImpl_rol_u16,
488 iemAImpl_rol_u32,
489 iemAImpl_rol_u64
490};
491
492/** Function table for the ROR instruction. */
493static const IEMOPSHIFTSIZES g_iemAImpl_ror =
494{
495 iemAImpl_ror_u8,
496 iemAImpl_ror_u16,
497 iemAImpl_ror_u32,
498 iemAImpl_ror_u64
499};
500
501/** Function table for the RCL instruction. */
502static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
503{
504 iemAImpl_rcl_u8,
505 iemAImpl_rcl_u16,
506 iemAImpl_rcl_u32,
507 iemAImpl_rcl_u64
508};
509
510/** Function table for the RCR instruction. */
511static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
512{
513 iemAImpl_rcr_u8,
514 iemAImpl_rcr_u16,
515 iemAImpl_rcr_u32,
516 iemAImpl_rcr_u64
517};
518
519/** Function table for the SHL instruction. */
520static const IEMOPSHIFTSIZES g_iemAImpl_shl =
521{
522 iemAImpl_shl_u8,
523 iemAImpl_shl_u16,
524 iemAImpl_shl_u32,
525 iemAImpl_shl_u64
526};
527
528/** Function table for the SHR instruction. */
529static const IEMOPSHIFTSIZES g_iemAImpl_shr =
530{
531 iemAImpl_shr_u8,
532 iemAImpl_shr_u16,
533 iemAImpl_shr_u32,
534 iemAImpl_shr_u64
535};
536
537/** Function table for the SAR instruction. */
538static const IEMOPSHIFTSIZES g_iemAImpl_sar =
539{
540 iemAImpl_sar_u8,
541 iemAImpl_sar_u16,
542 iemAImpl_sar_u32,
543 iemAImpl_sar_u64
544};
545
546
547/** Function table for the MUL instruction. */
548static const IEMOPMULDIVSIZES g_iemAImpl_mul =
549{
550 iemAImpl_mul_u8,
551 iemAImpl_mul_u16,
552 iemAImpl_mul_u32,
553 iemAImpl_mul_u64
554};
555
556/** Function table for the IMUL instruction working implicitly on rAX. */
557static const IEMOPMULDIVSIZES g_iemAImpl_imul =
558{
559 iemAImpl_imul_u8,
560 iemAImpl_imul_u16,
561 iemAImpl_imul_u32,
562 iemAImpl_imul_u64
563};
564
565/** Function table for the DIV instruction. */
566static const IEMOPMULDIVSIZES g_iemAImpl_div =
567{
568 iemAImpl_div_u8,
569 iemAImpl_div_u16,
570 iemAImpl_div_u32,
571 iemAImpl_div_u64
572};
573
574/** Function table for the MUL instruction. */
575static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
576{
577 iemAImpl_idiv_u8,
578 iemAImpl_idiv_u16,
579 iemAImpl_idiv_u32,
580 iemAImpl_idiv_u64
581};
582
583/** Function table for the SHLD instruction */
584static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
585{
586 iemAImpl_shld_u16,
587 iemAImpl_shld_u32,
588 iemAImpl_shld_u64,
589};
590
591/** Function table for the SHRD instruction */
592static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
593{
594 iemAImpl_shrd_u16,
595 iemAImpl_shrd_u32,
596 iemAImpl_shrd_u64,
597};
598
599
600/*******************************************************************************
601* Internal Functions *
602*******************************************************************************/
603static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
604static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
605static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
606static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
607static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
608#ifdef IEM_VERIFICATION_MODE
609static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
610#endif
611static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
612static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
613
614
615/**
616 * Initializes the decoder state.
617 *
618 * @param pIemCpu The per CPU IEM state.
619 */
620DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)
621{
622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
623
624 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
625 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
626 ? IEMMODE_64BIT
627 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
628 ? IEMMODE_32BIT
629 : IEMMODE_16BIT;
630 pIemCpu->enmCpuMode = enmMode;
631 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
632 pIemCpu->enmEffAddrMode = enmMode;
633 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
634 pIemCpu->enmEffOpSize = enmMode;
635 pIemCpu->fPrefixes = 0;
636 pIemCpu->uRexReg = 0;
637 pIemCpu->uRexB = 0;
638 pIemCpu->uRexIndex = 0;
639 pIemCpu->iEffSeg = X86_SREG_DS;
640 pIemCpu->offOpcode = 0;
641 pIemCpu->cbOpcode = 0;
642 pIemCpu->cActiveMappings = 0;
643 pIemCpu->iNextMapping = 0;
644}
645
646
647/**
648 * Prefetch opcodes the first time when starting executing.
649 *
650 * @returns Strict VBox status code.
651 * @param pIemCpu The IEM state.
652 */
653static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
654{
655#ifdef IEM_VERIFICATION_MODE
656 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
657#endif
658 iemInitDecode(pIemCpu);
659
660 /*
661 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
662 *
663 * First translate CS:rIP to a physical address.
664 */
665 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
666 uint32_t cbToTryRead;
667 RTGCPTR GCPtrPC;
668 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
669 {
670 cbToTryRead = PAGE_SIZE;
671 GCPtrPC = pCtx->rip;
672 if (!IEM_IS_CANONICAL(GCPtrPC))
673 return iemRaiseGeneralProtectionFault0(pIemCpu);
674 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
675 }
676 else
677 {
678 uint32_t GCPtrPC32 = pCtx->eip;
679 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
680 if (GCPtrPC32 > pCtx->csHid.u32Limit)
681 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
682 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
683 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
684 }
685
686 RTGCPHYS GCPhys;
687 uint64_t fFlags;
688 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
689 if (RT_FAILURE(rc))
690 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
691 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
692 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
693 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
694 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
695 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
696 /** @todo Check reserved bits and such stuff. PGM is better at doing
697 * that, so do it when implementing the guest virtual address
698 * TLB... */
699
700#ifdef IEM_VERIFICATION_MODE
701 /*
702 * Optimistic optimization: Use unconsumed opcode bytes from the previous
703 * instruction.
704 */
705 /** @todo optimize this differently by not using PGMPhysRead. */
706 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
707 pIemCpu->GCPhysOpcodes = GCPhys;
708 if ( offPrevOpcodes < cbOldOpcodes
709 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
710 {
711 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
712 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
713 pIemCpu->cbOpcode = cbNew;
714 return VINF_SUCCESS;
715 }
716#endif
717
718 /*
719 * Read the bytes at this address.
720 */
721 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
722 if (cbToTryRead > cbLeftOnPage)
723 cbToTryRead = cbLeftOnPage;
724 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
725 cbToTryRead = sizeof(pIemCpu->abOpcode);
726 /** @todo patch manager */
727 if (!pIemCpu->fByPassHandlers)
728 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
729 else
730 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
731 if (rc != VINF_SUCCESS)
732 return rc;
733 pIemCpu->cbOpcode = cbToTryRead;
734
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
741 * exception if it fails.
742 *
743 * @returns Strict VBox status code.
744 * @param pIemCpu The IEM state.
745 * @param cbMin Where to return the opcode byte.
746 */
747static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
748{
749 /*
750 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
751 *
752 * First translate CS:rIP to a physical address.
753 */
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
756 uint32_t cbToTryRead;
757 RTGCPTR GCPtrNext;
758 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
759 {
760 cbToTryRead = PAGE_SIZE;
761 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
762 if (!IEM_IS_CANONICAL(GCPtrNext))
763 return iemRaiseGeneralProtectionFault0(pIemCpu);
764 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
765 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
766 }
767 else
768 {
769 uint32_t GCPtrNext32 = pCtx->eip;
770 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
771 GCPtrNext32 += pIemCpu->cbOpcode;
772 if (GCPtrNext32 > pCtx->csHid.u32Limit)
773 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
774 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
775 if (cbToTryRead < cbMin - cbLeft)
776 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
777 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
778 }
779
780 RTGCPHYS GCPhys;
781 uint64_t fFlags;
782 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
783 if (RT_FAILURE(rc))
784 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
785 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
786 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
787 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
788 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
789 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
790 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
791 /** @todo Check reserved bits and such stuff. PGM is better at doing
792 * that, so do it when implementing the guest virtual address
793 * TLB... */
794
795 /*
796 * Read the bytes at this address.
797 */
798 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
799 if (cbToTryRead > cbLeftOnPage)
800 cbToTryRead = cbLeftOnPage;
801 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
802 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
803 Assert(cbToTryRead >= cbMin - cbLeft);
804 if (!pIemCpu->fByPassHandlers)
805 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
806 else
807 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
808 if (rc != VINF_SUCCESS)
809 return rc;
810 pIemCpu->cbOpcode += cbToTryRead;
811 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
812
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.
819 *
820 * @returns Strict VBox status code.
821 * @param pIemCpu The IEM state.
822 * @param pb Where to return the opcode byte.
823 */
824static VBOXSTRICTRC iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)
825{
826 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
827 if (rcStrict == VINF_SUCCESS)
828 {
829 uint8_t offOpcode = pIemCpu->offOpcode;
830 *pb = pIemCpu->abOpcode[offOpcode];
831 pIemCpu->offOpcode = offOpcode + 1;
832 }
833 else
834 *pb = 0;
835 return rcStrict;
836}
837
838
839/**
840 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
841 *
842 * @returns Strict VBox status code.
843 * @param pIemCpu The IEM state.
844 * @param pu16 Where to return the opcode dword.
845 */
846static VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
847{
848 uint8_t u8;
849 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);
850 if (rcStrict == VINF_SUCCESS)
851 *pu16 = (int8_t)u8;
852 return rcStrict;
853}
854
855
856/**
857 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
858 *
859 * @returns Strict VBox status code.
860 * @param pIemCpu The IEM state.
861 * @param pu16 Where to return the opcode word.
862 */
863static VBOXSTRICTRC iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
864{
865 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
866 if (rcStrict == VINF_SUCCESS)
867 {
868 uint8_t offOpcode = pIemCpu->offOpcode;
869 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
870 pIemCpu->offOpcode = offOpcode + 2;
871 }
872 else
873 *pu16 = 0;
874 return rcStrict;
875}
876
877
878/**
879 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
880 *
881 * @returns Strict VBox status code.
882 * @param pIemCpu The IEM state.
883 * @param pu32 Where to return the opcode dword.
884 */
885static VBOXSTRICTRC iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
886{
887 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
888 if (rcStrict == VINF_SUCCESS)
889 {
890 uint8_t offOpcode = pIemCpu->offOpcode;
891 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
892 pIemCpu->abOpcode[offOpcode + 1],
893 pIemCpu->abOpcode[offOpcode + 2],
894 pIemCpu->abOpcode[offOpcode + 3]);
895 pIemCpu->offOpcode = offOpcode + 4;
896 }
897 else
898 *pu32 = 0;
899 return rcStrict;
900}
901
902
903/**
904 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
905 *
906 * @returns Strict VBox status code.
907 * @param pIemCpu The IEM state.
908 * @param pu64 Where to return the opcode qword.
909 */
910static VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
911{
912 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
913 if (rcStrict == VINF_SUCCESS)
914 {
915 uint8_t offOpcode = pIemCpu->offOpcode;
916 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
917 pIemCpu->abOpcode[offOpcode + 1],
918 pIemCpu->abOpcode[offOpcode + 2],
919 pIemCpu->abOpcode[offOpcode + 3]);
920 pIemCpu->offOpcode = offOpcode + 4;
921 }
922 else
923 *pu64 = 0;
924 return rcStrict;
925}
926
927
928/**
929 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
930 *
931 * @returns Strict VBox status code.
932 * @param pIemCpu The IEM state.
933 * @param pu64 Where to return the opcode qword.
934 */
935static VBOXSTRICTRC iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
936{
937 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
938 if (rcStrict == VINF_SUCCESS)
939 {
940 uint8_t offOpcode = pIemCpu->offOpcode;
941 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
942 pIemCpu->abOpcode[offOpcode + 1],
943 pIemCpu->abOpcode[offOpcode + 2],
944 pIemCpu->abOpcode[offOpcode + 3],
945 pIemCpu->abOpcode[offOpcode + 4],
946 pIemCpu->abOpcode[offOpcode + 5],
947 pIemCpu->abOpcode[offOpcode + 6],
948 pIemCpu->abOpcode[offOpcode + 7]);
949 pIemCpu->offOpcode = offOpcode + 8;
950 }
951 else
952 *pu64 = 0;
953 return rcStrict;
954}
955
956
957/**
958 * Fetches the next opcode byte.
959 *
960 * @returns Strict VBox status code.
961 * @param pIemCpu The IEM state.
962 * @param pu8 Where to return the opcode byte.
963 */
964DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
965{
966 uint8_t const offOpcode = pIemCpu->offOpcode;
967 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
968 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);
969
970 *pu8 = pIemCpu->abOpcode[offOpcode];
971 pIemCpu->offOpcode = offOpcode + 1;
972 return VINF_SUCCESS;
973}
974
975/**
976 * Fetches the next opcode byte, returns automatically on failure.
977 *
978 * @param pIemCpu The IEM state.
979 * @param a_pu8 Where to return the opcode byte.
980 */
981#define IEM_OPCODE_GET_NEXT_BYTE(a_pIemCpu, a_pu8) \
982 do \
983 { \
984 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8((a_pIemCpu), (a_pu8)); \
985 if (rcStrict2 != VINF_SUCCESS) \
986 return rcStrict2; \
987 } while (0)
988
989
990/**
991 * Fetches the next signed byte from the opcode stream.
992 *
993 * @returns Strict VBox status code.
994 * @param pIemCpu The IEM state.
995 * @param pi8 Where to return the signed byte.
996 */
997DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
998{
999 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1000}
1001
1002/**
1003 * Fetches the next signed byte from the opcode stream, returning automatically
1004 * on failure.
1005 *
1006 * @param pIemCpu The IEM state.
1007 * @param pi8 Where to return the signed byte.
1008 */
1009#define IEM_OPCODE_GET_NEXT_S8(a_pIemCpu, a_pi8) \
1010 do \
1011 { \
1012 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8((a_pIemCpu), (a_pi8)); \
1013 if (rcStrict2 != VINF_SUCCESS) \
1014 return rcStrict2; \
1015 } while (0)
1016
1017
1018/**
1019 * Fetches the next signed byte from the opcode stream, extending it to
1020 * unsigned 16-bit.
1021 *
1022 * @returns Strict VBox status code.
1023 * @param pIemCpu The IEM state.
1024 * @param pu16 Where to return the unsigned word.
1025 */
1026DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1027{
1028 uint8_t const offOpcode = pIemCpu->offOpcode;
1029 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1030 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1031
1032 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1033 pIemCpu->offOpcode = offOpcode + 1;
1034 return VINF_SUCCESS;
1035}
1036
1037
1038/**
1039 * Fetches the next signed byte from the opcode stream and sign-extending it to
1040 * a word, returning automatically on failure.
1041 *
1042 * @param pIemCpu The IEM state.
1043 * @param pu16 Where to return the word.
1044 */
1045#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pIemCpu, a_pu16) \
1046 do \
1047 { \
1048 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16((a_pIemCpu), (a_pu16)); \
1049 if (rcStrict2 != VINF_SUCCESS) \
1050 return rcStrict2; \
1051 } while (0)
1052
1053
1054/**
1055 * Fetches the next opcode word.
1056 *
1057 * @returns Strict VBox status code.
1058 * @param pIemCpu The IEM state.
1059 * @param pu16 Where to return the opcode word.
1060 */
1061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1062{
1063 uint8_t const offOpcode = pIemCpu->offOpcode;
1064 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1065 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1066
1067 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1068 pIemCpu->offOpcode = offOpcode + 2;
1069 return VINF_SUCCESS;
1070}
1071
1072/**
1073 * Fetches the next opcode word, returns automatically on failure.
1074 *
1075 * @param pIemCpu The IEM state.
1076 * @param a_pu16 Where to return the opcode word.
1077 */
1078#define IEM_OPCODE_GET_NEXT_U16(a_pIemCpu, a_pu16) \
1079 do \
1080 { \
1081 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16((a_pIemCpu), (a_pu16)); \
1082 if (rcStrict2 != VINF_SUCCESS) \
1083 return rcStrict2; \
1084 } while (0)
1085
1086
1087/**
1088 * Fetches the next opcode dword.
1089 *
1090 * @returns Strict VBox status code.
1091 * @param pIemCpu The IEM state.
1092 * @param pu32 Where to return the opcode double word.
1093 */
1094DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1095{
1096 uint8_t const offOpcode = pIemCpu->offOpcode;
1097 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1098 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1099
1100 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1101 pIemCpu->abOpcode[offOpcode + 1],
1102 pIemCpu->abOpcode[offOpcode + 2],
1103 pIemCpu->abOpcode[offOpcode + 3]);
1104 pIemCpu->offOpcode = offOpcode + 4;
1105 return VINF_SUCCESS;
1106}
1107
1108/**
1109 * Fetches the next opcode dword, returns automatically on failure.
1110 *
1111 * @param pIemCpu The IEM state.
1112 * @param a_u32 Where to return the opcode dword.
1113 */
1114#define IEM_OPCODE_GET_NEXT_U32(a_pIemCpu, a_pu32) \
1115 do \
1116 { \
1117 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32((a_pIemCpu), (a_pu32)); \
1118 if (rcStrict2 != VINF_SUCCESS) \
1119 return rcStrict2; \
1120 } while (0)
1121
1122
1123/**
1124 * Fetches the next opcode dword, sign extending it into a quad word.
1125 *
1126 * @returns Strict VBox status code.
1127 * @param pIemCpu The IEM state.
1128 * @param pu64 Where to return the opcode quad word.
1129 */
1130DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1131{
1132 uint8_t const offOpcode = pIemCpu->offOpcode;
1133 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1134 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1135
1136 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1137 pIemCpu->abOpcode[offOpcode + 1],
1138 pIemCpu->abOpcode[offOpcode + 2],
1139 pIemCpu->abOpcode[offOpcode + 3]);
1140 *pu64 = i32;
1141 pIemCpu->offOpcode = offOpcode + 4;
1142 return VINF_SUCCESS;
1143}
1144
1145/**
1146 * Fetches the next opcode double word and sign extends it to a quad word,
1147 * returns automatically on failure.
1148 *
1149 * @param pIemCpu The IEM state.
1150 * @param a_pu64 Where to return the opcode quad word.
1151 */
1152#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pIemCpu, a_pu64) \
1153 do \
1154 { \
1155 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64((a_pIemCpu), (a_pu64)); \
1156 if (rcStrict2 != VINF_SUCCESS) \
1157 return rcStrict2; \
1158 } while (0)
1159
1160
1161/**
1162 * Fetches the next opcode qword.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pIemCpu The IEM state.
1166 * @param pu64 Where to return the opcode qword.
1167 */
1168DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1169{
1170 uint8_t const offOpcode = pIemCpu->offOpcode;
1171 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1172 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1173
1174 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1175 pIemCpu->abOpcode[offOpcode + 1],
1176 pIemCpu->abOpcode[offOpcode + 2],
1177 pIemCpu->abOpcode[offOpcode + 3],
1178 pIemCpu->abOpcode[offOpcode + 4],
1179 pIemCpu->abOpcode[offOpcode + 5],
1180 pIemCpu->abOpcode[offOpcode + 6],
1181 pIemCpu->abOpcode[offOpcode + 7]);
1182 pIemCpu->offOpcode = offOpcode + 8;
1183 return VINF_SUCCESS;
1184}
1185
1186/**
1187 * Fetches the next opcode word, returns automatically on failure.
1188 *
1189 * @param pIemCpu The IEM state.
1190 * @param a_pu64 Where to return the opcode qword.
1191 */
1192#define IEM_OPCODE_GET_NEXT_U64(a_pIemCpu, a_pu64) \
1193 do \
1194 { \
1195 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64((a_pIemCpu), (a_pu64)); \
1196 if (rcStrict2 != VINF_SUCCESS) \
1197 return rcStrict2; \
1198 } while (0)
1199
1200
1201/** @name Raising Exceptions.
1202 *
1203 * @{
1204 */
1205
1206static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)
1207{
1208 AssertFailed(/** @todo implement this */);
1209 return VERR_NOT_IMPLEMENTED;
1210}
1211
1212static VBOXSTRICTRC iemRaiseDebugException(PIEMCPU pIemCpu)
1213{
1214 AssertFailed(/** @todo implement this */);
1215 return VERR_NOT_IMPLEMENTED;
1216}
1217
1218static VBOXSTRICTRC iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
1219{
1220 AssertFailed(/** @todo implement this */);
1221 return VERR_NOT_IMPLEMENTED;
1222}
1223
1224
1225static VBOXSTRICTRC iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
1226{
1227 AssertFailed(/** @todo implement this */);
1228 return VERR_NOT_IMPLEMENTED;
1229}
1230
1231
1232static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
1233{
1234 AssertFailed(/** @todo implement this */);
1235 return VERR_NOT_IMPLEMENTED;
1236}
1237
1238
1239static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
1240{
1241 AssertFailed(/** @todo implement this */);
1242 return VERR_NOT_IMPLEMENTED;
1243}
1244
1245
1246static VBOXSTRICTRC iemRaiseNotCanonical(PIEMCPU pIemCpu)
1247{
1248 AssertFailed(/** @todo implement this */);
1249 return VERR_NOT_IMPLEMENTED;
1250}
1251
1252
1253static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1254{
1255 AssertFailed(/** @todo implement this */);
1256 return VERR_NOT_IMPLEMENTED;
1257}
1258
1259
1260static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
1261{
1262 AssertFailed(/** @todo implement this */);
1263 return VERR_NOT_IMPLEMENTED;
1264}
1265
1266
1267static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
1268{
1269 AssertFailed(/** @todo implement this */);
1270 return VERR_NOT_IMPLEMENTED;
1271}
1272
1273
1274static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
1275{
1276 AssertFailed(/** @todo implement this */);
1277 return VERR_NOT_IMPLEMENTED;
1278}
1279
1280
1281static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
1282{
1283 AssertFailed(/** @todo implement this */);
1284 return VERR_NOT_IMPLEMENTED;
1285}
1286
1287
1288static VBOXSTRICTRC iemRaiseMathFault(PIEMCPU pIemCpu)
1289{
1290 AssertFailed(/** @todo implement this */);
1291 return VERR_NOT_IMPLEMENTED;
1292}
1293
1294
1295
1296/**
1297 * Macro for calling iemCImplRaiseInvalidLockPrefix().
1298 *
1299 * This enables us to add/remove arguments and force different levels of
1300 * inlining as we wish.
1301 *
1302 * @return Strict VBox status code.
1303 */
1304#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
1305IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
1306{
1307 AssertFailed();
1308 return VERR_NOT_IMPLEMENTED;
1309}
1310
1311
1312/**
1313 * Macro for calling iemCImplRaiseInvalidOpcode().
1314 *
1315 * This enables us to add/remove arguments and force different levels of
1316 * inlining as we wish.
1317 *
1318 * @return Strict VBox status code.
1319 */
1320#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
1321IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
1322{
1323 AssertFailed();
1324 return VERR_NOT_IMPLEMENTED;
1325}
1326
1327
1328/** @} */
1329
1330
1331/*
1332 *
1333 * Helpers routines.
1334 * Helpers routines.
1335 * Helpers routines.
1336 *
1337 */
1338
1339/**
1340 * Recalculates the effective operand size.
1341 *
1342 * @param pIemCpu The IEM state.
1343 */
1344static void iemRecalEffOpSize(PIEMCPU pIemCpu)
1345{
1346 switch (pIemCpu->enmCpuMode)
1347 {
1348 case IEMMODE_16BIT:
1349 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1350 break;
1351 case IEMMODE_32BIT:
1352 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1353 break;
1354 case IEMMODE_64BIT:
1355 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1356 {
1357 case 0:
1358 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
1359 break;
1360 case IEM_OP_PRF_SIZE_OP:
1361 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1362 break;
1363 case IEM_OP_PRF_SIZE_REX_W:
1364 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1365 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1366 break;
1367 }
1368 break;
1369 default:
1370 AssertFailed();
1371 }
1372}
1373
1374
1375/**
1376 * Sets the default operand size to 64-bit and recalculates the effective
1377 * operand size.
1378 *
1379 * @param pIemCpu The IEM state.
1380 */
1381static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
1382{
1383 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1384 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1385 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1386 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
1387 else
1388 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
1389}
1390
1391
1392/*
1393 *
1394 * Common opcode decoders.
1395 * Common opcode decoders.
1396 * Common opcode decoders.
1397 *
1398 */
1399#include <iprt/mem.h>
1400
1401/**
1402 * Used to add extra details about a stub case.
1403 * @param pIemCpu The IEM per CPU state.
1404 */
1405static void iemOpStubMsg2(PIEMCPU pIemCpu)
1406{
1407 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1408 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1409 char szRegs[4096];
1410 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1411 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1412 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1413 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1414 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1415 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1416 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1417 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1418 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1419 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1420 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1421 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1422 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1423 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1424 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1425 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1426 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1427 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1428 " efer=%016VR{efer}\n"
1429 " pat=%016VR{pat}\n"
1430 " sf_mask=%016VR{sf_mask}\n"
1431 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1432 " lstar=%016VR{lstar}\n"
1433 " star=%016VR{star} cstar=%016VR{cstar}\n"
1434 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1435 );
1436
1437 char szInstr[256];
1438 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
1439 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1440 szInstr, sizeof(szInstr), NULL);
1441
1442 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
1443}
1444
1445
1446/** Stubs an opcode. */
1447#define FNIEMOP_STUB(a_Name) \
1448 FNIEMOP_DEF(a_Name) \
1449 { \
1450 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
1451 iemOpStubMsg2(pIemCpu); \
1452 RTAssertPanic(); \
1453 return VERR_NOT_IMPLEMENTED; \
1454 } \
1455 typedef int ignore_semicolon
1456
1457/** Stubs an opcode. */
1458#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
1459 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1460 { \
1461 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
1462 iemOpStubMsg2(pIemCpu); \
1463 RTAssertPanic(); \
1464 return VERR_NOT_IMPLEMENTED; \
1465 } \
1466 typedef int ignore_semicolon
1467
1468
1469
1470/** @name Register Access.
1471 * @{
1472 */
1473
1474/**
1475 * Gets a reference (pointer) to the specified hidden segment register.
1476 *
1477 * @returns Hidden register reference.
1478 * @param pIemCpu The per CPU data.
1479 * @param iSegReg The segment register.
1480 */
1481static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
1482{
1483 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1484 switch (iSegReg)
1485 {
1486 case X86_SREG_ES: return &pCtx->esHid;
1487 case X86_SREG_CS: return &pCtx->csHid;
1488 case X86_SREG_SS: return &pCtx->ssHid;
1489 case X86_SREG_DS: return &pCtx->dsHid;
1490 case X86_SREG_FS: return &pCtx->fsHid;
1491 case X86_SREG_GS: return &pCtx->gsHid;
1492 }
1493 AssertFailedReturn(NULL);
1494}
1495
1496
1497/**
1498 * Gets a reference (pointer) to the specified segment register (the selector
1499 * value).
1500 *
1501 * @returns Pointer to the selector variable.
1502 * @param pIemCpu The per CPU data.
1503 * @param iSegReg The segment register.
1504 */
1505static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
1506{
1507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1508 switch (iSegReg)
1509 {
1510 case X86_SREG_ES: return &pCtx->es;
1511 case X86_SREG_CS: return &pCtx->cs;
1512 case X86_SREG_SS: return &pCtx->ss;
1513 case X86_SREG_DS: return &pCtx->ds;
1514 case X86_SREG_FS: return &pCtx->fs;
1515 case X86_SREG_GS: return &pCtx->gs;
1516 }
1517 AssertFailedReturn(NULL);
1518}
1519
1520
1521/**
1522 * Fetches the selector value of a segment register.
1523 *
1524 * @returns The selector value.
1525 * @param pIemCpu The per CPU data.
1526 * @param iSegReg The segment register.
1527 */
1528static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
1529{
1530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1531 switch (iSegReg)
1532 {
1533 case X86_SREG_ES: return pCtx->es;
1534 case X86_SREG_CS: return pCtx->cs;
1535 case X86_SREG_SS: return pCtx->ss;
1536 case X86_SREG_DS: return pCtx->ds;
1537 case X86_SREG_FS: return pCtx->fs;
1538 case X86_SREG_GS: return pCtx->gs;
1539 }
1540 AssertFailedReturn(0xffff);
1541}
1542
1543
1544/**
1545 * Gets a reference (pointer) to the specified general register.
1546 *
1547 * @returns Register reference.
1548 * @param pIemCpu The per CPU data.
1549 * @param iReg The general register.
1550 */
1551static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
1552{
1553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1554 switch (iReg)
1555 {
1556 case X86_GREG_xAX: return &pCtx->rax;
1557 case X86_GREG_xCX: return &pCtx->rcx;
1558 case X86_GREG_xDX: return &pCtx->rdx;
1559 case X86_GREG_xBX: return &pCtx->rbx;
1560 case X86_GREG_xSP: return &pCtx->rsp;
1561 case X86_GREG_xBP: return &pCtx->rbp;
1562 case X86_GREG_xSI: return &pCtx->rsi;
1563 case X86_GREG_xDI: return &pCtx->rdi;
1564 case X86_GREG_x8: return &pCtx->r8;
1565 case X86_GREG_x9: return &pCtx->r9;
1566 case X86_GREG_x10: return &pCtx->r10;
1567 case X86_GREG_x11: return &pCtx->r11;
1568 case X86_GREG_x12: return &pCtx->r12;
1569 case X86_GREG_x13: return &pCtx->r13;
1570 case X86_GREG_x14: return &pCtx->r14;
1571 case X86_GREG_x15: return &pCtx->r15;
1572 }
1573 AssertFailedReturn(NULL);
1574}
1575
1576
1577/**
1578 * Gets a reference (pointer) to the specified 8-bit general register.
1579 *
1580 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1581 *
1582 * @returns Register reference.
1583 * @param pIemCpu The per CPU data.
1584 * @param iReg The register.
1585 */
1586static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
1587{
1588 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
1589 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
1590
1591 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
1592 if (iReg >= 4)
1593 pu8Reg++;
1594 return pu8Reg;
1595}
1596
1597
1598/**
1599 * Fetches the value of a 8-bit general register.
1600 *
1601 * @returns The register value.
1602 * @param pIemCpu The per CPU data.
1603 * @param iReg The register.
1604 */
1605static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
1606{
1607 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
1608 return *pbSrc;
1609}
1610
1611
1612/**
1613 * Fetches the value of a 16-bit general register.
1614 *
1615 * @returns The register value.
1616 * @param pIemCpu The per CPU data.
1617 * @param iReg The register.
1618 */
1619static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
1620{
1621 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
1622}
1623
1624
1625/**
1626 * Fetches the value of a 32-bit general register.
1627 *
1628 * @returns The register value.
1629 * @param pIemCpu The per CPU data.
1630 * @param iReg The register.
1631 */
1632static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
1633{
1634 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
1635}
1636
1637
1638/**
1639 * Fetches the value of a 64-bit general register.
1640 *
1641 * @returns The register value.
1642 * @param pIemCpu The per CPU data.
1643 * @param iReg The register.
1644 */
1645static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
1646{
1647 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
1648}
1649
1650
1651/**
1652 * Is the FPU state in FXSAVE format or not.
1653 *
1654 * @returns true if it is, false if it's in FNSAVE.
1655 * @param pVCpu The virtual CPU handle.
1656 */
1657DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
1658{
1659#ifdef RT_ARCH_AMD64
1660 return true;
1661#else
1662/// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
1663 return true;
1664#endif
1665}
1666
1667
1668/**
1669 * Gets the FPU status word.
1670 *
1671 * @returns FPU status word
1672 * @param pIemCpu The per CPU data.
1673 */
1674static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
1675{
1676 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1677 uint16_t u16Fsw;
1678 if (iemFRegIsFxSaveFormat(pIemCpu))
1679 u16Fsw = pCtx->fpu.FSW;
1680 else
1681 {
1682 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
1683 u16Fsw = pFpu->FSW;
1684 }
1685 return u16Fsw;
1686}
1687
1688/**
1689 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
1690 *
1691 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1692 * segment limit.
1693 *
1694 * @param pIemCpu The per CPU data.
1695 * @param offNextInstr The offset of the next instruction.
1696 */
1697static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
1698{
1699 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1700 switch (pIemCpu->enmEffOpSize)
1701 {
1702 case IEMMODE_16BIT:
1703 {
1704 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1705 if ( uNewIp > pCtx->csHid.u32Limit
1706 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1707 return iemRaiseGeneralProtectionFault0(pIemCpu);
1708 pCtx->rip = uNewIp;
1709 break;
1710 }
1711
1712 case IEMMODE_32BIT:
1713 {
1714 Assert(pCtx->rip <= UINT32_MAX);
1715 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1716
1717 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1718 if (uNewEip > pCtx->csHid.u32Limit)
1719 return iemRaiseGeneralProtectionFault0(pIemCpu);
1720 pCtx->rip = uNewEip;
1721 break;
1722 }
1723
1724 case IEMMODE_64BIT:
1725 {
1726 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1727
1728 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1729 if (!IEM_IS_CANONICAL(uNewRip))
1730 return iemRaiseGeneralProtectionFault0(pIemCpu);
1731 pCtx->rip = uNewRip;
1732 break;
1733 }
1734
1735 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1736 }
1737
1738 return VINF_SUCCESS;
1739}
1740
1741
1742/**
1743 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
1744 *
1745 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1746 * segment limit.
1747 *
1748 * @returns Strict VBox status code.
1749 * @param pIemCpu The per CPU data.
1750 * @param offNextInstr The offset of the next instruction.
1751 */
1752static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
1753{
1754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1755 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
1756
1757 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
1758 if ( uNewIp > pCtx->csHid.u32Limit
1759 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1760 return iemRaiseGeneralProtectionFault0(pIemCpu);
1761 /** @todo Test 16-bit jump in 64-bit mode. */
1762 pCtx->rip = uNewIp;
1763
1764 return VINF_SUCCESS;
1765}
1766
1767
1768/**
1769 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
1770 *
1771 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1772 * segment limit.
1773 *
1774 * @returns Strict VBox status code.
1775 * @param pIemCpu The per CPU data.
1776 * @param offNextInstr The offset of the next instruction.
1777 */
1778static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
1779{
1780 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1781 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
1782
1783 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
1784 {
1785 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1786
1787 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
1788 if (uNewEip > pCtx->csHid.u32Limit)
1789 return iemRaiseGeneralProtectionFault0(pIemCpu);
1790 pCtx->rip = uNewEip;
1791 }
1792 else
1793 {
1794 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1795
1796 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
1797 if (!IEM_IS_CANONICAL(uNewRip))
1798 return iemRaiseGeneralProtectionFault0(pIemCpu);
1799 pCtx->rip = uNewRip;
1800 }
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Performs a near jump to the specified address.
1807 *
1808 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1809 * segment limit.
1810 *
1811 * @param pIemCpu The per CPU data.
1812 * @param uNewRip The new RIP value.
1813 */
1814static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
1815{
1816 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1817 switch (pIemCpu->enmEffOpSize)
1818 {
1819 case IEMMODE_16BIT:
1820 {
1821 Assert(uNewRip <= UINT16_MAX);
1822 if ( uNewRip > pCtx->csHid.u32Limit
1823 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
1824 return iemRaiseGeneralProtectionFault0(pIemCpu);
1825 /** @todo Test 16-bit jump in 64-bit mode. */
1826 pCtx->rip = uNewRip;
1827 break;
1828 }
1829
1830 case IEMMODE_32BIT:
1831 {
1832 Assert(uNewRip <= UINT32_MAX);
1833 Assert(pCtx->rip <= UINT32_MAX);
1834 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
1835
1836 if (uNewRip > pCtx->csHid.u32Limit)
1837 return iemRaiseGeneralProtectionFault0(pIemCpu);
1838 pCtx->rip = uNewRip;
1839 break;
1840 }
1841
1842 case IEMMODE_64BIT:
1843 {
1844 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
1845
1846 if (!IEM_IS_CANONICAL(uNewRip))
1847 return iemRaiseGeneralProtectionFault0(pIemCpu);
1848 pCtx->rip = uNewRip;
1849 break;
1850 }
1851
1852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1853 }
1854
1855 return VINF_SUCCESS;
1856}
1857
1858
1859/**
1860 * Get the address of the top of the stack.
1861 *
1862 * @param pCtx The CPU context which SP/ESP/RSP should be
1863 * read.
1864 */
1865DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
1866{
1867 if (pCtx->ssHid.Attr.n.u1Long)
1868 return pCtx->rsp;
1869 if (pCtx->ssHid.Attr.n.u1DefBig)
1870 return pCtx->esp;
1871 return pCtx->sp;
1872}
1873
1874
1875/**
1876 * Updates the RIP/EIP/IP to point to the next instruction.
1877 *
1878 * @param pIemCpu The per CPU data.
1879 * @param cbInstr The number of bytes to add.
1880 */
1881static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
1882{
1883 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1884 switch (pIemCpu->enmCpuMode)
1885 {
1886 case IEMMODE_16BIT:
1887 Assert(pCtx->rip <= UINT16_MAX);
1888 pCtx->eip += cbInstr;
1889 pCtx->eip &= UINT32_C(0xffff);
1890 break;
1891
1892 case IEMMODE_32BIT:
1893 pCtx->eip += cbInstr;
1894 Assert(pCtx->rip <= UINT32_MAX);
1895 break;
1896
1897 case IEMMODE_64BIT:
1898 pCtx->rip += cbInstr;
1899 break;
1900 default: AssertFailed();
1901 }
1902}
1903
1904
1905/**
1906 * Updates the RIP/EIP/IP to point to the next instruction.
1907 *
1908 * @param pIemCpu The per CPU data.
1909 */
1910static void iemRegUpdateRip(PIEMCPU pIemCpu)
1911{
1912 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
1913}
1914
1915
1916/**
1917 * Adds to the stack pointer.
1918 *
1919 * @param pCtx The CPU context which SP/ESP/RSP should be
1920 * updated.
1921 * @param cbToAdd The number of bytes to add.
1922 */
1923DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
1924{
1925 if (pCtx->ssHid.Attr.n.u1Long)
1926 pCtx->rsp += cbToAdd;
1927 else if (pCtx->ssHid.Attr.n.u1DefBig)
1928 pCtx->esp += cbToAdd;
1929 else
1930 pCtx->sp += cbToAdd;
1931}
1932
1933
1934/**
1935 * Subtracts from the stack pointer.
1936 *
1937 * @param pCtx The CPU context which SP/ESP/RSP should be
1938 * updated.
1939 * @param cbToSub The number of bytes to subtract.
1940 */
1941DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
1942{
1943 if (pCtx->ssHid.Attr.n.u1Long)
1944 pCtx->rsp -= cbToSub;
1945 else if (pCtx->ssHid.Attr.n.u1DefBig)
1946 pCtx->esp -= cbToSub;
1947 else
1948 pCtx->sp -= cbToSub;
1949}
1950
1951
1952/**
1953 * Adds to the temporary stack pointer.
1954 *
1955 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1956 * @param cbToAdd The number of bytes to add.
1957 * @param pCtx Where to get the current stack mode.
1958 */
1959DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
1960{
1961 if (pCtx->ssHid.Attr.n.u1Long)
1962 pTmpRsp->u += cbToAdd;
1963 else if (pCtx->ssHid.Attr.n.u1DefBig)
1964 pTmpRsp->DWords.dw0 += cbToAdd;
1965 else
1966 pTmpRsp->Words.w0 += cbToAdd;
1967}
1968
1969
1970/**
1971 * Subtracts from the temporary stack pointer.
1972 *
1973 * @param pTmpRsp The temporary SP/ESP/RSP to update.
1974 * @param cbToSub The number of bytes to subtract.
1975 * @param pCtx Where to get the current stack mode.
1976 */
1977DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
1978{
1979 if (pCtx->ssHid.Attr.n.u1Long)
1980 pTmpRsp->u -= cbToSub;
1981 else if (pCtx->ssHid.Attr.n.u1DefBig)
1982 pTmpRsp->DWords.dw0 -= cbToSub;
1983 else
1984 pTmpRsp->Words.w0 -= cbToSub;
1985}
1986
1987
1988/**
1989 * Calculates the effective stack address for a push of the specified size as
1990 * well as the new RSP value (upper bits may be masked).
1991 *
1992 * @returns Effective stack addressf for the push.
1993 * @param pCtx Where to get the current stack mode.
1994 * @param cbItem The size of the stack item to pop.
1995 * @param puNewRsp Where to return the new RSP value.
1996 */
1997DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
1998{
1999 RTUINT64U uTmpRsp;
2000 RTGCPTR GCPtrTop;
2001 uTmpRsp.u = pCtx->rsp;
2002
2003 if (pCtx->ssHid.Attr.n.u1Long)
2004 GCPtrTop = uTmpRsp.u -= cbItem;
2005 else if (pCtx->ssHid.Attr.n.u1DefBig)
2006 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2007 else
2008 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2009 *puNewRsp = uTmpRsp.u;
2010 return GCPtrTop;
2011}
2012
2013
2014/**
2015 * Gets the current stack pointer and calculates the value after a pop of the
2016 * specified size.
2017 *
2018 * @returns Current stack pointer.
2019 * @param pCtx Where to get the current stack mode.
2020 * @param cbItem The size of the stack item to pop.
2021 * @param puNewRsp Where to return the new RSP value.
2022 */
2023DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
2024{
2025 RTUINT64U uTmpRsp;
2026 RTGCPTR GCPtrTop;
2027 uTmpRsp.u = pCtx->rsp;
2028
2029 if (pCtx->ssHid.Attr.n.u1Long)
2030 {
2031 GCPtrTop = uTmpRsp.u;
2032 uTmpRsp.u += cbItem;
2033 }
2034 else if (pCtx->ssHid.Attr.n.u1DefBig)
2035 {
2036 GCPtrTop = uTmpRsp.DWords.dw0;
2037 uTmpRsp.DWords.dw0 += cbItem;
2038 }
2039 else
2040 {
2041 GCPtrTop = uTmpRsp.Words.w0;
2042 uTmpRsp.Words.w0 += cbItem;
2043 }
2044 *puNewRsp = uTmpRsp.u;
2045 return GCPtrTop;
2046}
2047
2048
2049/**
2050 * Calculates the effective stack address for a push of the specified size as
2051 * well as the new temporary RSP value (upper bits may be masked).
2052 *
2053 * @returns Effective stack addressf for the push.
2054 * @param pTmpRsp The temporary stack pointer. This is updated.
2055 * @param cbItem The size of the stack item to pop.
2056 * @param puNewRsp Where to return the new RSP value.
2057 */
2058DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
2059{
2060 RTGCPTR GCPtrTop;
2061
2062 if (pCtx->ssHid.Attr.n.u1Long)
2063 GCPtrTop = pTmpRsp->u -= cbItem;
2064 else if (pCtx->ssHid.Attr.n.u1DefBig)
2065 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2066 else
2067 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2068 return GCPtrTop;
2069}
2070
2071
2072/**
2073 * Gets the effective stack address for a pop of the specified size and
2074 * calculates and updates the temporary RSP.
2075 *
2076 * @returns Current stack pointer.
2077 * @param pTmpRsp The temporary stack pointer. This is updated.
2078 * @param pCtx Where to get the current stack mode.
2079 * @param cbItem The size of the stack item to pop.
2080 */
2081DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
2082{
2083 RTGCPTR GCPtrTop;
2084 if (pCtx->ssHid.Attr.n.u1Long)
2085 {
2086 GCPtrTop = pTmpRsp->u;
2087 pTmpRsp->u += cbItem;
2088 }
2089 else if (pCtx->ssHid.Attr.n.u1DefBig)
2090 {
2091 GCPtrTop = pTmpRsp->DWords.dw0;
2092 pTmpRsp->DWords.dw0 += cbItem;
2093 }
2094 else
2095 {
2096 GCPtrTop = pTmpRsp->Words.w0;
2097 pTmpRsp->Words.w0 += cbItem;
2098 }
2099 return GCPtrTop;
2100}
2101
2102
2103/**
2104 * Checks if an Intel CPUID feature bit is set.
2105 *
2106 * @returns true / false.
2107 *
2108 * @param pIemCpu The IEM per CPU data.
2109 * @param fEdx The EDX bit to test, or 0 if ECX.
2110 * @param fEcx The ECX bit to test, or 0 if EDX.
2111 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
2112 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
2113 */
2114static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
2115{
2116 uint32_t uEax, uEbx, uEcx, uEdx;
2117 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
2118 return (fEcx && (uEcx & fEcx))
2119 || (fEdx && (uEdx & fEdx));
2120}
2121
2122
2123/**
2124 * Checks if an AMD CPUID feature bit is set.
2125 *
2126 * @returns true / false.
2127 *
2128 * @param pIemCpu The IEM per CPU data.
2129 * @param fEdx The EDX bit to test, or 0 if ECX.
2130 * @param fEcx The ECX bit to test, or 0 if EDX.
2131 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
2132 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
2133 */
2134static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
2135{
2136 uint32_t uEax, uEbx, uEcx, uEdx;
2137 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
2138 return (fEcx && (uEcx & fEcx))
2139 || (fEdx && (uEdx & fEdx));
2140}
2141
2142/** @} */
2143
2144
2145/** @name Memory access.
2146 *
2147 * @{
2148 */
2149
2150
2151/**
2152 * Checks if the given segment can be written to, raise the appropriate
2153 * exception if not.
2154 *
2155 * @returns VBox strict status code.
2156 *
2157 * @param pIemCpu The IEM per CPU data.
2158 * @param pHid Pointer to the hidden register.
2159 * @param iSegReg The register number.
2160 */
2161static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
2162{
2163 if (!pHid->Attr.n.u1Present)
2164 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
2165
2166 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2167 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2168 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
2169 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
2170
2171 /** @todo DPL/RPL/CPL? */
2172
2173 return VINF_SUCCESS;
2174}
2175
2176
2177/**
2178 * Checks if the given segment can be read from, raise the appropriate
2179 * exception if not.
2180 *
2181 * @returns VBox strict status code.
2182 *
2183 * @param pIemCpu The IEM per CPU data.
2184 * @param pHid Pointer to the hidden register.
2185 * @param iSegReg The register number.
2186 */
2187static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
2188{
2189 if (!pHid->Attr.n.u1Present)
2190 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
2191
2192 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
2193 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
2194 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
2195
2196 /** @todo DPL/RPL/CPL? */
2197
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Applies the segment limit, base and attributes.
2204 *
2205 * This may raise a \#GP or \#SS.
2206 *
2207 * @returns VBox strict status code.
2208 *
2209 * @param pIemCpu The IEM per CPU data.
2210 * @param fAccess The kind of access which is being performed.
2211 * @param iSegReg The index of the segment register to apply.
2212 * This is UINT8_MAX if none (for IDT, GDT, LDT,
2213 * TSS, ++).
2214 * @param pGCPtrMem Pointer to the guest memory address to apply
2215 * segmentation to. Input and output parameter.
2216 */
2217static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
2218 size_t cbMem, PRTGCPTR pGCPtrMem)
2219{
2220 if (iSegReg == UINT8_MAX)
2221 return VINF_SUCCESS;
2222
2223 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
2224 switch (pIemCpu->enmCpuMode)
2225 {
2226 case IEMMODE_16BIT:
2227 case IEMMODE_32BIT:
2228 {
2229 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
2230 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
2231
2232 Assert(pSel->Attr.n.u1Present);
2233 Assert(pSel->Attr.n.u1DescType);
2234 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
2235 {
2236 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
2237 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2238 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
2239
2240 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2241 {
2242 /** @todo CPL check. */
2243 }
2244
2245 /*
2246 * There are two kinds of data selectors, normal and expand down.
2247 */
2248 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
2249 {
2250 if ( GCPtrFirst32 > pSel->u32Limit
2251 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
2252 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
2253
2254 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
2255 }
2256 else
2257 {
2258 /** @todo implement expand down segments. */
2259 AssertFailed(/** @todo implement this */);
2260 return VERR_NOT_IMPLEMENTED;
2261 }
2262 }
2263 else
2264 {
2265
2266 /*
2267 * Code selector and usually be used to read thru, writing is
2268 * only permitted in real and V8086 mode.
2269 */
2270 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
2271 || ( (fAccess & IEM_ACCESS_TYPE_READ)
2272 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
2273 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
2274 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
2275
2276 if ( GCPtrFirst32 > pSel->u32Limit
2277 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
2278 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
2279
2280 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2281 {
2282 /** @todo CPL check. */
2283 }
2284
2285 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
2286 }
2287 return VINF_SUCCESS;
2288 }
2289
2290 case IEMMODE_64BIT:
2291 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
2292 *pGCPtrMem += pSel->u64Base;
2293 return VINF_SUCCESS;
2294
2295 default:
2296 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
2297 }
2298}
2299
2300
2301/**
2302 * Translates a virtual address to a physical physical address and checks if we
2303 * can access the page as specified.
2304 *
2305 * @param pIemCpu The IEM per CPU data.
2306 * @param GCPtrMem The virtual address.
2307 * @param fAccess The intended access.
2308 * @param pGCPhysMem Where to return the physical address.
2309 */
2310static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
2311 PRTGCPHYS pGCPhysMem)
2312{
2313 /** @todo Need a different PGM interface here. We're currently using
2314 * generic / REM interfaces. this won't cut it for R0 & RC. */
2315 RTGCPHYS GCPhys;
2316 uint64_t fFlags;
2317 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
2318 if (RT_FAILURE(rc))
2319 {
2320 /** @todo Check unassigned memory in unpaged mode. */
2321 *pGCPhysMem = NIL_RTGCPHYS;
2322 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
2323 }
2324
2325 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)
2326 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */
2327 && !(fFlags & X86_PTE_RW)
2328 && ( pIemCpu->uCpl != 0
2329 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )
2330 || ( !(fFlags & X86_PTE_US) /* Kernel memory */
2331 && pIemCpu->uCpl == 3)
2332 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */
2333 && (fFlags & X86_PTE_PAE_NX)
2334 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
2335 )
2336 )
2337 {
2338 *pGCPhysMem = NIL_RTGCPHYS;
2339 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
2340 }
2341
2342 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
2343 *pGCPhysMem = GCPhys;
2344 return VINF_SUCCESS;
2345}
2346
2347
2348
2349/**
2350 * Maps a physical page.
2351 *
2352 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
2353 * @param pIemCpu The IEM per CPU data.
2354 * @param GCPhysMem The physical address.
2355 * @param fAccess The intended access.
2356 * @param ppvMem Where to return the mapping address.
2357 */
2358static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
2359{
2360#ifdef IEM_VERIFICATION_MODE
2361 /* Force the alternative path so we can ignore writes. */
2362 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
2363 return VERR_PGM_PHYS_TLB_CATCH_ALL;
2364#endif
2365
2366 /*
2367 * If we can map the page without trouble, do a block processing
2368 * until the end of the current page.
2369 */
2370 /** @todo need some better API. */
2371 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
2372 GCPhysMem,
2373 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
2374 ppvMem);
2375}
2376
2377
2378/**
2379 * Looks up a memory mapping entry.
2380 *
2381 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
2382 * @param pIemCpu The IEM per CPU data.
2383 * @param pvMem The memory address.
2384 * @param fAccess The access to.
2385 */
2386DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2387{
2388 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
2389 if ( pIemCpu->aMemMappings[0].pv == pvMem
2390 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2391 return 0;
2392 if ( pIemCpu->aMemMappings[1].pv == pvMem
2393 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2394 return 1;
2395 if ( pIemCpu->aMemMappings[2].pv == pvMem
2396 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2397 return 2;
2398 return VERR_NOT_FOUND;
2399}
2400
2401
2402/**
2403 * Finds a free memmap entry when using iNextMapping doesn't work.
2404 *
2405 * @returns Memory mapping index, 1024 on failure.
2406 * @param pIemCpu The IEM per CPU data.
2407 */
2408static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
2409{
2410 /*
2411 * The easy case.
2412 */
2413 if (pIemCpu->cActiveMappings == 0)
2414 {
2415 pIemCpu->iNextMapping = 1;
2416 return 0;
2417 }
2418
2419 /* There should be enough mappings for all instructions. */
2420 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
2421
2422 AssertFailed(); /** @todo implement me. */
2423 return 1024;
2424
2425}
2426
2427
2428/**
2429 * Commits a bounce buffer that needs writing back and unmaps it.
2430 *
2431 * @returns Strict VBox status code.
2432 * @param pIemCpu The IEM per CPU data.
2433 * @param iMemMap The index of the buffer to commit.
2434 */
2435static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
2436{
2437 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
2438 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
2439
2440 /*
2441 * Do the writing.
2442 */
2443 int rc;
2444 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
2445 && !IEM_VERIFICATION_ENABLED(pIemCpu))
2446 {
2447 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2448 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2449 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2450 if (!pIemCpu->fByPassHandlers)
2451 {
2452 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2453 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2454 pbBuf,
2455 cbFirst);
2456 if (cbSecond && rc == VINF_SUCCESS)
2457 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
2458 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2459 pbBuf + cbFirst,
2460 cbSecond);
2461 }
2462 else
2463 {
2464 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2465 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
2466 pbBuf,
2467 cbFirst);
2468 if (cbSecond && rc == VINF_SUCCESS)
2469 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
2470 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
2471 pbBuf + cbFirst,
2472 cbSecond);
2473 }
2474 }
2475 else
2476 rc = VINF_SUCCESS;
2477
2478#ifdef IEM_VERIFICATION_MODE
2479 /*
2480 * Record the write(s).
2481 */
2482 if (!pIemCpu->fNoRem)
2483 {
2484 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2485 if (pEvtRec)
2486 {
2487 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2488 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
2489 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
2490 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
2491 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2492 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2493 }
2494 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
2495 {
2496 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2497 if (pEvtRec)
2498 {
2499 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
2500 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
2501 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
2502 memcpy(pEvtRec->u.RamWrite.ab,
2503 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
2504 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
2505 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2506 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2507 }
2508 }
2509 }
2510#endif
2511
2512 /*
2513 * Free the mapping entry.
2514 */
2515 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2516 Assert(pIemCpu->cActiveMappings != 0);
2517 pIemCpu->cActiveMappings--;
2518 return rc;
2519}
2520
2521
2522/**
2523 * iemMemMap worker that deals with a request crossing pages.
2524 */
2525static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
2526 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
2527{
2528 /*
2529 * Do the address translations.
2530 */
2531 RTGCPHYS GCPhysFirst;
2532 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
2533 if (rcStrict != VINF_SUCCESS)
2534 return rcStrict;
2535
2536 RTGCPHYS GCPhysSecond;
2537 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
2538 if (rcStrict != VINF_SUCCESS)
2539 return rcStrict;
2540 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2541
2542 /*
2543 * Read in the current memory content if it's a read of execute access.
2544 */
2545 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2546 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
2547 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
2548
2549 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2550 {
2551 int rc;
2552 if (!pIemCpu->fByPassHandlers)
2553 {
2554 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
2555 if (rc != VINF_SUCCESS)
2556 return rc;
2557 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
2558 if (rc != VINF_SUCCESS)
2559 return rc;
2560 }
2561 else
2562 {
2563 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
2564 if (rc != VINF_SUCCESS)
2565 return rc;
2566 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
2567 if (rc != VINF_SUCCESS)
2568 return rc;
2569 }
2570
2571#ifdef IEM_VERIFICATION_MODE
2572 if (!pIemCpu->fNoRem)
2573 {
2574 /*
2575 * Record the reads.
2576 */
2577 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2578 if (pEvtRec)
2579 {
2580 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2581 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2582 pEvtRec->u.RamRead.cb = cbFirstPage;
2583 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2584 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2585 }
2586 pEvtRec = iemVerifyAllocRecord(pIemCpu);
2587 if (pEvtRec)
2588 {
2589 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2590 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
2591 pEvtRec->u.RamRead.cb = cbSecondPage;
2592 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2593 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2594 }
2595 }
2596#endif
2597 }
2598#ifdef VBOX_STRICT
2599 else
2600 memset(pbBuf, 0xcc, cbMem);
2601#endif
2602#ifdef VBOX_STRICT
2603 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2604 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2605#endif
2606
2607 /*
2608 * Commit the bounce buffer entry.
2609 */
2610 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2611 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
2612 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
2613 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
2614 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
2615 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2616 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2617 pIemCpu->cActiveMappings++;
2618
2619 *ppvMem = pbBuf;
2620 return VINF_SUCCESS;
2621}
2622
2623
2624/**
2625 * iemMemMap woker that deals with iemMemPageMap failures.
2626 */
2627static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
2628 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
2629{
2630 /*
2631 * Filter out conditions we can handle and the ones which shouldn't happen.
2632 */
2633 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
2634 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
2635 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
2636 {
2637 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
2638 return rcMap;
2639 }
2640 pIemCpu->cPotentialExits++;
2641
2642 /*
2643 * Read in the current memory content if it's a read of execute access.
2644 */
2645 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
2646 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
2647 {
2648 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
2649 memset(pbBuf, 0xff, cbMem);
2650 else
2651 {
2652 int rc;
2653 if (!pIemCpu->fByPassHandlers)
2654 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
2655 else
2656 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
2657 if (rc != VINF_SUCCESS)
2658 return rc;
2659 }
2660
2661#ifdef IEM_VERIFICATION_MODE
2662 if (!pIemCpu->fNoRem)
2663 {
2664 /*
2665 * Record the read.
2666 */
2667 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
2668 if (pEvtRec)
2669 {
2670 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
2671 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
2672 pEvtRec->u.RamRead.cb = cbMem;
2673 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
2674 *pIemCpu->ppIemEvtRecNext = pEvtRec;
2675 }
2676 }
2677#endif
2678 }
2679#ifdef VBOX_STRICT
2680 else
2681 memset(pbBuf, 0xcc, cbMem);
2682#endif
2683#ifdef VBOX_STRICT
2684 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
2685 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
2686#endif
2687
2688 /*
2689 * Commit the bounce buffer entry.
2690 */
2691 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2692 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
2693 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
2694 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
2695 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
2696 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
2697 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2698 pIemCpu->cActiveMappings++;
2699
2700 *ppvMem = pbBuf;
2701 return VINF_SUCCESS;
2702}
2703
2704
2705
2706/**
2707 * Maps the specified guest memory for the given kind of access.
2708 *
2709 * This may be using bounce buffering of the memory if it's crossing a page
2710 * boundary or if there is an access handler installed for any of it. Because
2711 * of lock prefix guarantees, we're in for some extra clutter when this
2712 * happens.
2713 *
2714 * This may raise a \#GP, \#SS, \#PF or \#AC.
2715 *
2716 * @returns VBox strict status code.
2717 *
2718 * @param pIemCpu The IEM per CPU data.
2719 * @param ppvMem Where to return the pointer to the mapped
2720 * memory.
2721 * @param cbMem The number of bytes to map. This is usually 1,
2722 * 2, 4, 6, 8, 12, 16 or 32. When used by string
2723 * operations it can be up to a page.
2724 * @param iSegReg The index of the segment register to use for
2725 * this access. The base and limits are checked.
2726 * Use UINT8_MAX to indicate that no segmentation
2727 * is required (for IDT, GDT and LDT accesses).
2728 * @param GCPtrMem The address of the guest memory.
2729 * @param a_fAccess How the memory is being accessed. The
2730 * IEM_ACCESS_TYPE_XXX bit is used to figure out
2731 * how to map the memory, while the
2732 * IEM_ACCESS_WHAT_XXX bit is used when raising
2733 * exceptions.
2734 */
2735static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
2736{
2737 /*
2738 * Check the input and figure out which mapping entry to use.
2739 */
2740 Assert(cbMem <= 32);
2741 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
2742
2743 unsigned iMemMap = pIemCpu->iNextMapping;
2744 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
2745 {
2746 iMemMap = iemMemMapFindFree(pIemCpu);
2747 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
2748 }
2749
2750 /*
2751 * Map the memory, checking that we can actually access it. If something
2752 * slightly complicated happens, fall back on bounce buffering.
2753 */
2754 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
2755 if (rcStrict != VINF_SUCCESS)
2756 return rcStrict;
2757
2758 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
2759 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
2760
2761 RTGCPHYS GCPhysFirst;
2762 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
2763 if (rcStrict != VINF_SUCCESS)
2764 return rcStrict;
2765
2766 void *pvMem;
2767 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
2768 if (rcStrict != VINF_SUCCESS)
2769 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
2770
2771 /*
2772 * Fill in the mapping table entry.
2773 */
2774 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
2775 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
2776 pIemCpu->iNextMapping = iMemMap + 1;
2777 pIemCpu->cActiveMappings++;
2778
2779 *ppvMem = pvMem;
2780 return VINF_SUCCESS;
2781}
2782
2783
2784/**
2785 * Commits the guest memory if bounce buffered and unmaps it.
2786 *
2787 * @returns Strict VBox status code.
2788 * @param pIemCpu The IEM per CPU data.
2789 * @param pvMem The mapping.
2790 * @param fAccess The kind of access.
2791 */
2792static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
2793{
2794 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
2795 AssertReturn(iMemMap >= 0, iMemMap);
2796
2797 /*
2798 * If it's bounce buffered, we need to write back the buffer.
2799 */
2800 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2801 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
2802 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
2803
2804 /* Free the entry. */
2805 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2806 Assert(pIemCpu->cActiveMappings != 0);
2807 pIemCpu->cActiveMappings--;
2808 return VINF_SUCCESS;
2809}
2810
2811
2812/**
2813 * Fetches a data byte.
2814 *
2815 * @returns Strict VBox status code.
2816 * @param pIemCpu The IEM per CPU data.
2817 * @param pu8Dst Where to return the byte.
2818 * @param iSegReg The index of the segment register to use for
2819 * this access. The base and limits are checked.
2820 * @param GCPtrMem The address of the guest memory.
2821 */
2822static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2823{
2824 /* The lazy approach for now... */
2825 uint8_t const *pu8Src;
2826 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2827 if (rc == VINF_SUCCESS)
2828 {
2829 *pu8Dst = *pu8Src;
2830 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2831 }
2832 return rc;
2833}
2834
2835
2836/**
2837 * Fetches a data word.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pIemCpu The IEM per CPU data.
2841 * @param pu16Dst Where to return the word.
2842 * @param iSegReg The index of the segment register to use for
2843 * this access. The base and limits are checked.
2844 * @param GCPtrMem The address of the guest memory.
2845 */
2846static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2847{
2848 /* The lazy approach for now... */
2849 uint16_t const *pu16Src;
2850 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2851 if (rc == VINF_SUCCESS)
2852 {
2853 *pu16Dst = *pu16Src;
2854 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
2855 }
2856 return rc;
2857}
2858
2859
2860/**
2861 * Fetches a data dword.
2862 *
2863 * @returns Strict VBox status code.
2864 * @param pIemCpu The IEM per CPU data.
2865 * @param pu32Dst Where to return the dword.
2866 * @param iSegReg The index of the segment register to use for
2867 * this access. The base and limits are checked.
2868 * @param GCPtrMem The address of the guest memory.
2869 */
2870static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2871{
2872 /* The lazy approach for now... */
2873 uint32_t const *pu32Src;
2874 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2875 if (rc == VINF_SUCCESS)
2876 {
2877 *pu32Dst = *pu32Src;
2878 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
2879 }
2880 return rc;
2881}
2882
2883
2884/**
2885 * Fetches a data dword and sign extends it to a qword.
2886 *
2887 * @returns Strict VBox status code.
2888 * @param pIemCpu The IEM per CPU data.
2889 * @param pu64Dst Where to return the sign extended value.
2890 * @param iSegReg The index of the segment register to use for
2891 * this access. The base and limits are checked.
2892 * @param GCPtrMem The address of the guest memory.
2893 */
2894static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2895{
2896 /* The lazy approach for now... */
2897 int32_t const *pi32Src;
2898 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2899 if (rc == VINF_SUCCESS)
2900 {
2901 *pu64Dst = *pi32Src;
2902 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
2903 }
2904#ifdef __GNUC__ /* warning: GCC may be a royal pain */
2905 else
2906 *pu64Dst = 0;
2907#endif
2908 return rc;
2909}
2910
2911
2912/**
2913 * Fetches a data qword.
2914 *
2915 * @returns Strict VBox status code.
2916 * @param pIemCpu The IEM per CPU data.
2917 * @param pu64Dst Where to return the qword.
2918 * @param iSegReg The index of the segment register to use for
2919 * this access. The base and limits are checked.
2920 * @param GCPtrMem The address of the guest memory.
2921 */
2922static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
2923{
2924 /* The lazy approach for now... */
2925 uint64_t const *pu64Src;
2926 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
2927 if (rc == VINF_SUCCESS)
2928 {
2929 *pu64Dst = *pu64Src;
2930 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
2931 }
2932 return rc;
2933}
2934
2935
2936/**
2937 * Fetches a descriptor register (lgdt, lidt).
2938 *
2939 * @returns Strict VBox status code.
2940 * @param pIemCpu The IEM per CPU data.
2941 * @param pcbLimit Where to return the limit.
2942 * @param pGCPTrBase Where to return the base.
2943 * @param iSegReg The index of the segment register to use for
2944 * this access. The base and limits are checked.
2945 * @param GCPtrMem The address of the guest memory.
2946 * @param enmOpSize The effective operand size.
2947 */
2948static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
2949 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
2950{
2951 uint8_t const *pu8Src;
2952 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
2953 (void **)&pu8Src,
2954 enmOpSize == IEMMODE_64BIT
2955 ? 2 + 8
2956 : enmOpSize == IEMMODE_32BIT
2957 ? 2 + 4
2958 : 2 + 3,
2959 iSegReg,
2960 GCPtrMem,
2961 IEM_ACCESS_DATA_R);
2962 if (rcStrict == VINF_SUCCESS)
2963 {
2964 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
2965 switch (enmOpSize)
2966 {
2967 case IEMMODE_16BIT:
2968 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
2969 break;
2970 case IEMMODE_32BIT:
2971 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
2972 break;
2973 case IEMMODE_64BIT:
2974 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
2975 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
2976 break;
2977
2978 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2979 }
2980 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
2981 }
2982 return rcStrict;
2983}
2984
2985
2986
2987/**
2988 * Stores a data byte.
2989 *
2990 * @returns Strict VBox status code.
2991 * @param pIemCpu The IEM per CPU data.
2992 * @param iSegReg The index of the segment register to use for
2993 * this access. The base and limits are checked.
2994 * @param GCPtrMem The address of the guest memory.
2995 * @param u8Value The value to store.
2996 */
2997static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
2998{
2999 /* The lazy approach for now... */
3000 uint8_t *pu8Dst;
3001 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3002 if (rc == VINF_SUCCESS)
3003 {
3004 *pu8Dst = u8Value;
3005 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
3006 }
3007 return rc;
3008}
3009
3010
3011/**
3012 * Stores a data word.
3013 *
3014 * @returns Strict VBox status code.
3015 * @param pIemCpu The IEM per CPU data.
3016 * @param iSegReg The index of the segment register to use for
3017 * this access. The base and limits are checked.
3018 * @param GCPtrMem The address of the guest memory.
3019 * @param u16Value The value to store.
3020 */
3021static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
3022{
3023 /* The lazy approach for now... */
3024 uint16_t *pu16Dst;
3025 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3026 if (rc == VINF_SUCCESS)
3027 {
3028 *pu16Dst = u16Value;
3029 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
3030 }
3031 return rc;
3032}
3033
3034
3035/**
3036 * Stores a data dword.
3037 *
3038 * @returns Strict VBox status code.
3039 * @param pIemCpu The IEM per CPU data.
3040 * @param iSegReg The index of the segment register to use for
3041 * this access. The base and limits are checked.
3042 * @param GCPtrMem The address of the guest memory.
3043 * @param u32Value The value to store.
3044 */
3045static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
3046{
3047 /* The lazy approach for now... */
3048 uint32_t *pu32Dst;
3049 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3050 if (rc == VINF_SUCCESS)
3051 {
3052 *pu32Dst = u32Value;
3053 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
3054 }
3055 return rc;
3056}
3057
3058
3059/**
3060 * Stores a data qword.
3061 *
3062 * @returns Strict VBox status code.
3063 * @param pIemCpu The IEM per CPU data.
3064 * @param iSegReg The index of the segment register to use for
3065 * this access. The base and limits are checked.
3066 * @param GCPtrMem The address of the guest memory.
3067 * @param u64Value The value to store.
3068 */
3069static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
3070{
3071 /* The lazy approach for now... */
3072 uint64_t *pu64Dst;
3073 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3074 if (rc == VINF_SUCCESS)
3075 {
3076 *pu64Dst = u64Value;
3077 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
3078 }
3079 return rc;
3080}
3081
3082
3083/**
3084 * Pushes a word onto the stack.
3085 *
3086 * @returns Strict VBox status code.
3087 * @param pIemCpu The IEM per CPU data.
3088 * @param u16Value The value to push.
3089 */
3090static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
3091{
3092 /* Increment the stack pointer. */
3093 uint64_t uNewRsp;
3094 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3095 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
3096
3097 /* Write the word the lazy way. */
3098 uint16_t *pu16Dst;
3099 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3100 if (rc == VINF_SUCCESS)
3101 {
3102 *pu16Dst = u16Value;
3103 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
3104 }
3105
3106 /* Commit the new RSP value unless we an access handler made trouble. */
3107 if (rc == VINF_SUCCESS)
3108 pCtx->rsp = uNewRsp;
3109
3110 return rc;
3111}
3112
3113
3114/**
3115 * Pushes a dword onto the stack.
3116 *
3117 * @returns Strict VBox status code.
3118 * @param pIemCpu The IEM per CPU data.
3119 * @param u32Value The value to push.
3120 */
3121static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
3122{
3123 /* Increment the stack pointer. */
3124 uint64_t uNewRsp;
3125 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3126 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
3127
3128 /* Write the word the lazy way. */
3129 uint32_t *pu32Dst;
3130 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3131 if (rc == VINF_SUCCESS)
3132 {
3133 *pu32Dst = u32Value;
3134 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
3135 }
3136
3137 /* Commit the new RSP value unless we an access handler made trouble. */
3138 if (rc == VINF_SUCCESS)
3139 pCtx->rsp = uNewRsp;
3140
3141 return rc;
3142}
3143
3144
3145/**
3146 * Pushes a qword onto the stack.
3147 *
3148 * @returns Strict VBox status code.
3149 * @param pIemCpu The IEM per CPU data.
3150 * @param u64Value The value to push.
3151 */
3152static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
3153{
3154 /* Increment the stack pointer. */
3155 uint64_t uNewRsp;
3156 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3157 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
3158
3159 /* Write the word the lazy way. */
3160 uint64_t *pu64Dst;
3161 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3162 if (rc == VINF_SUCCESS)
3163 {
3164 *pu64Dst = u64Value;
3165 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
3166 }
3167
3168 /* Commit the new RSP value unless we an access handler made trouble. */
3169 if (rc == VINF_SUCCESS)
3170 pCtx->rsp = uNewRsp;
3171
3172 return rc;
3173}
3174
3175
3176/**
3177 * Pops a word from the stack.
3178 *
3179 * @returns Strict VBox status code.
3180 * @param pIemCpu The IEM per CPU data.
3181 * @param pu16Value Where to store the popped value.
3182 */
3183static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
3184{
3185 /* Increment the stack pointer. */
3186 uint64_t uNewRsp;
3187 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3188 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
3189
3190 /* Write the word the lazy way. */
3191 uint16_t const *pu16Src;
3192 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3193 if (rc == VINF_SUCCESS)
3194 {
3195 *pu16Value = *pu16Src;
3196 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3197
3198 /* Commit the new RSP value. */
3199 if (rc == VINF_SUCCESS)
3200 pCtx->rsp = uNewRsp;
3201 }
3202
3203 return rc;
3204}
3205
3206
3207/**
3208 * Pops a dword from the stack.
3209 *
3210 * @returns Strict VBox status code.
3211 * @param pIemCpu The IEM per CPU data.
3212 * @param pu32Value Where to store the popped value.
3213 */
3214static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
3215{
3216 /* Increment the stack pointer. */
3217 uint64_t uNewRsp;
3218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3219 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
3220
3221 /* Write the word the lazy way. */
3222 uint32_t const *pu32Src;
3223 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3224 if (rc == VINF_SUCCESS)
3225 {
3226 *pu32Value = *pu32Src;
3227 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3228
3229 /* Commit the new RSP value. */
3230 if (rc == VINF_SUCCESS)
3231 pCtx->rsp = uNewRsp;
3232 }
3233
3234 return rc;
3235}
3236
3237
3238/**
3239 * Pops a qword from the stack.
3240 *
3241 * @returns Strict VBox status code.
3242 * @param pIemCpu The IEM per CPU data.
3243 * @param pu64Value Where to store the popped value.
3244 */
3245static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
3246{
3247 /* Increment the stack pointer. */
3248 uint64_t uNewRsp;
3249 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3250 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
3251
3252 /* Write the word the lazy way. */
3253 uint64_t const *pu64Src;
3254 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3255 if (rc == VINF_SUCCESS)
3256 {
3257 *pu64Value = *pu64Src;
3258 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3259
3260 /* Commit the new RSP value. */
3261 if (rc == VINF_SUCCESS)
3262 pCtx->rsp = uNewRsp;
3263 }
3264
3265 return rc;
3266}
3267
3268
3269/**
3270 * Pushes a word onto the stack, using a temporary stack pointer.
3271 *
3272 * @returns Strict VBox status code.
3273 * @param pIemCpu The IEM per CPU data.
3274 * @param u16Value The value to push.
3275 * @param pTmpRsp Pointer to the temporary stack pointer.
3276 */
3277static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
3278{
3279 /* Increment the stack pointer. */
3280 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3281 RTUINT64U NewRsp = *pTmpRsp;
3282 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
3283
3284 /* Write the word the lazy way. */
3285 uint16_t *pu16Dst;
3286 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3287 if (rc == VINF_SUCCESS)
3288 {
3289 *pu16Dst = u16Value;
3290 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
3291 }
3292
3293 /* Commit the new RSP value unless we an access handler made trouble. */
3294 if (rc == VINF_SUCCESS)
3295 *pTmpRsp = NewRsp;
3296
3297 return rc;
3298}
3299
3300
3301/**
3302 * Pushes a dword onto the stack, using a temporary stack pointer.
3303 *
3304 * @returns Strict VBox status code.
3305 * @param pIemCpu The IEM per CPU data.
3306 * @param u32Value The value to push.
3307 * @param pTmpRsp Pointer to the temporary stack pointer.
3308 */
3309static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
3310{
3311 /* Increment the stack pointer. */
3312 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3313 RTUINT64U NewRsp = *pTmpRsp;
3314 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
3315
3316 /* Write the word the lazy way. */
3317 uint32_t *pu32Dst;
3318 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3319 if (rc == VINF_SUCCESS)
3320 {
3321 *pu32Dst = u32Value;
3322 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
3323 }
3324
3325 /* Commit the new RSP value unless we an access handler made trouble. */
3326 if (rc == VINF_SUCCESS)
3327 *pTmpRsp = NewRsp;
3328
3329 return rc;
3330}
3331
3332
3333/**
3334 * Pushes a dword onto the stack, using a temporary stack pointer.
3335 *
3336 * @returns Strict VBox status code.
3337 * @param pIemCpu The IEM per CPU data.
3338 * @param u64Value The value to push.
3339 * @param pTmpRsp Pointer to the temporary stack pointer.
3340 */
3341static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
3342{
3343 /* Increment the stack pointer. */
3344 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3345 RTUINT64U NewRsp = *pTmpRsp;
3346 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
3347
3348 /* Write the word the lazy way. */
3349 uint64_t *pu64Dst;
3350 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3351 if (rc == VINF_SUCCESS)
3352 {
3353 *pu64Dst = u64Value;
3354 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
3355 }
3356
3357 /* Commit the new RSP value unless we an access handler made trouble. */
3358 if (rc == VINF_SUCCESS)
3359 *pTmpRsp = NewRsp;
3360
3361 return rc;
3362}
3363
3364
3365/**
3366 * Pops a word from the stack, using a temporary stack pointer.
3367 *
3368 * @returns Strict VBox status code.
3369 * @param pIemCpu The IEM per CPU data.
3370 * @param pu16Value Where to store the popped value.
3371 * @param pTmpRsp Pointer to the temporary stack pointer.
3372 */
3373static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
3374{
3375 /* Increment the stack pointer. */
3376 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3377 RTUINT64U NewRsp = *pTmpRsp;
3378 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
3379
3380 /* Write the word the lazy way. */
3381 uint16_t const *pu16Src;
3382 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3383 if (rc == VINF_SUCCESS)
3384 {
3385 *pu16Value = *pu16Src;
3386 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3387
3388 /* Commit the new RSP value. */
3389 if (rc == VINF_SUCCESS)
3390 *pTmpRsp = NewRsp;
3391 }
3392
3393 return rc;
3394}
3395
3396
3397/**
3398 * Pops a dword from the stack, using a temporary stack pointer.
3399 *
3400 * @returns Strict VBox status code.
3401 * @param pIemCpu The IEM per CPU data.
3402 * @param pu32Value Where to store the popped value.
3403 * @param pTmpRsp Pointer to the temporary stack pointer.
3404 */
3405static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
3406{
3407 /* Increment the stack pointer. */
3408 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3409 RTUINT64U NewRsp = *pTmpRsp;
3410 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
3411
3412 /* Write the word the lazy way. */
3413 uint32_t const *pu32Src;
3414 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3415 if (rc == VINF_SUCCESS)
3416 {
3417 *pu32Value = *pu32Src;
3418 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
3419
3420 /* Commit the new RSP value. */
3421 if (rc == VINF_SUCCESS)
3422 *pTmpRsp = NewRsp;
3423 }
3424
3425 return rc;
3426}
3427
3428
3429/**
3430 * Pops a qword from the stack, using a temporary stack pointer.
3431 *
3432 * @returns Strict VBox status code.
3433 * @param pIemCpu The IEM per CPU data.
3434 * @param pu64Value Where to store the popped value.
3435 * @param pTmpRsp Pointer to the temporary stack pointer.
3436 */
3437static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
3438{
3439 /* Increment the stack pointer. */
3440 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3441 RTUINT64U NewRsp = *pTmpRsp;
3442 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
3443
3444 /* Write the word the lazy way. */
3445 uint64_t const *pu64Src;
3446 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3447 if (rcStrict == VINF_SUCCESS)
3448 {
3449 *pu64Value = *pu64Src;
3450 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
3451
3452 /* Commit the new RSP value. */
3453 if (rcStrict == VINF_SUCCESS)
3454 *pTmpRsp = NewRsp;
3455 }
3456
3457 return rcStrict;
3458}
3459
3460
3461/**
3462 * Begin a special stack push (used by interrupt, exceptions and such).
3463 *
3464 * This will raise #SS or #PF if appropriate.
3465 *
3466 * @returns Strict VBox status code.
3467 * @param pIemCpu The IEM per CPU data.
3468 * @param cbMem The number of bytes to push onto the stack.
3469 * @param ppvMem Where to return the pointer to the stack memory.
3470 * As with the other memory functions this could be
3471 * direct access or bounce buffered access, so
3472 * don't commit register until the commit call
3473 * succeeds.
3474 * @param puNewRsp Where to return the new RSP value. This must be
3475 * passed unchanged to
3476 * iemMemStackPushCommitSpecial().
3477 */
3478static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
3479{
3480 Assert(cbMem < UINT8_MAX);
3481 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3482 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
3483 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3484}
3485
3486
3487/**
3488 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
3489 *
3490 * This will update the rSP.
3491 *
3492 * @returns Strict VBox status code.
3493 * @param pIemCpu The IEM per CPU data.
3494 * @param pvMem The pointer returned by
3495 * iemMemStackPushBeginSpecial().
3496 * @param uNewRsp The new RSP value returned by
3497 * iemMemStackPushBeginSpecial().
3498 */
3499static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
3500{
3501 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
3502 if (rcStrict == VINF_SUCCESS)
3503 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3504 return rcStrict;
3505}
3506
3507
3508/**
3509 * Begin a special stack pop (used by iret, retf and such).
3510 *
3511 * This will raise #SS or #PF if appropriate.
3512 *
3513 * @returns Strict VBox status code.
3514 * @param pIemCpu The IEM per CPU data.
3515 * @param cbMem The number of bytes to push onto the stack.
3516 * @param ppvMem Where to return the pointer to the stack memory.
3517 * @param puNewRsp Where to return the new RSP value. This must be
3518 * passed unchanged to
3519 * iemMemStackPopCommitSpecial().
3520 */
3521static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
3522{
3523 Assert(cbMem < UINT8_MAX);
3524 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3525 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
3526 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3527}
3528
3529
3530/**
3531 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
3532 *
3533 * This will update the rSP.
3534 *
3535 * @returns Strict VBox status code.
3536 * @param pIemCpu The IEM per CPU data.
3537 * @param pvMem The pointer returned by
3538 * iemMemStackPopBeginSpecial().
3539 * @param uNewRsp The new RSP value returned by
3540 * iemMemStackPopBeginSpecial().
3541 */
3542static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
3543{
3544 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
3545 if (rcStrict == VINF_SUCCESS)
3546 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
3547 return rcStrict;
3548}
3549
3550
3551/**
3552 * Fetches a descriptor table entry.
3553 *
3554 * @returns Strict VBox status code.
3555 * @param pIemCpu The IEM per CPU.
3556 * @param pDesc Where to return the descriptor table entry.
3557 * @param uSel The selector which table entry to fetch.
3558 */
3559static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
3560{
3561 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3562
3563 /** @todo did the 286 require all 8 bytes to be accessible? */
3564 /*
3565 * Get the selector table base and check bounds.
3566 */
3567 RTGCPTR GCPtrBase;
3568 if (uSel & X86_SEL_LDT)
3569 {
3570 if ( !pCtx->ldtrHid.Attr.n.u1Present
3571 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
3572 {
3573 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
3574 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
3575 /** @todo is this the right exception? */
3576 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3577 }
3578
3579 Assert(pCtx->ldtrHid.Attr.n.u1Present);
3580 GCPtrBase = pCtx->ldtrHid.u64Base;
3581 }
3582 else
3583 {
3584 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
3585 {
3586 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
3587 /** @todo is this the right exception? */
3588 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3589 }
3590 GCPtrBase = pCtx->gdtr.pGdt;
3591 }
3592
3593 /*
3594 * Read the legacy descriptor and maybe the long mode extensions if
3595 * required.
3596 */
3597 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3598 if (rcStrict == VINF_SUCCESS)
3599 {
3600 if ( !IEM_IS_LONG_MODE(pIemCpu)
3601 || pDesc->Legacy.Gen.u1DescType)
3602 pDesc->Long.au64[1] = 0;
3603 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
3604 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
3605 else
3606 {
3607 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
3608 /** @todo is this the right exception? */
3609 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));
3610 }
3611 }
3612 return rcStrict;
3613}
3614
3615
3616/**
3617 * Marks the selector descriptor as accessed (only non-system descriptors).
3618 *
3619 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
3620 * will therefore skip the limit checks.
3621 *
3622 * @returns Strict VBox status code.
3623 * @param pIemCpu The IEM per CPU.
3624 * @param uSel The selector.
3625 */
3626static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
3627{
3628 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3629
3630 /*
3631 * Get the selector table base and calculate the entry address.
3632 */
3633 RTGCPTR GCPtr = uSel & X86_SEL_LDT
3634 ? pCtx->ldtrHid.u64Base
3635 : pCtx->gdtr.pGdt;
3636 GCPtr += uSel & X86_SEL_MASK;
3637
3638 /*
3639 * ASMAtomicBitSet will assert if the address is misaligned, so do some
3640 * ugly stuff to avoid this. This will make sure it's an atomic access
3641 * as well more or less remove any question about 8-bit or 32-bit accesss.
3642 */
3643 VBOXSTRICTRC rcStrict;
3644 uint32_t volatile *pu32;
3645 if ((GCPtr & 3) == 0)
3646 {
3647 /* The normal case, map the 32-bit bits around the accessed bit (40). */
3648 GCPtr += 2 + 2;
3649 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
3650 if (rcStrict != VINF_SUCCESS)
3651 return rcStrict;
3652 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
3653 }
3654 else
3655 {
3656 /* The misaligned GDT/LDT case, map the whole thing. */
3657 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
3658 if (rcStrict != VINF_SUCCESS)
3659 return rcStrict;
3660 switch ((uintptr_t)pu32 & 3)
3661 {
3662 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
3663 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
3664 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
3665 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
3666 }
3667 }
3668
3669 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);
3670}
3671
3672/** @} */
3673
3674
3675/*
3676 * Include the C/C++ implementation of instruction.
3677 */
3678#include "IEMAllCImpl.cpp.h"
3679
3680
3681
3682/** @name "Microcode" macros.
3683 *
3684 * The idea is that we should be able to use the same code to interpret
3685 * instructions as well as recompiler instructions. Thus this obfuscation.
3686 *
3687 * @{
3688 */
3689#define IEM_MC_BEGIN(cArgs, cLocals) {
3690#define IEM_MC_END() }
3691#define IEM_MC_PAUSE() do {} while (0)
3692#define IEM_MC_CONTINUE() do {} while (0)
3693
3694/** Internal macro. */
3695#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
3696 do \
3697 { \
3698 VBOXSTRICTRC rcStrict2 = a_Expr; \
3699 if (rcStrict2 != VINF_SUCCESS) \
3700 return rcStrict2; \
3701 } while (0)
3702
3703#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
3704#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
3705#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
3706#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
3707#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
3708#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
3709#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
3710
3711#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
3712#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
3713 do { \
3714 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
3715 return iemRaiseDeviceNotAvailable(pIemCpu); \
3716 } while (0)
3717#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
3718 do { \
3719 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
3720 return iemRaiseMathFault(pIemCpu); \
3721 } while (0)
3722#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
3723 do { \
3724 if (pIemCpu->uCpl != 0) \
3725 return iemRaiseGeneralProtectionFault0(pIemCpu); \
3726 } while (0)
3727
3728
3729#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
3730#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
3731#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
3732#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
3733#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
3734#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
3735 uint32_t a_Name; \
3736 uint32_t *a_pName = &a_Name
3737#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
3738 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
3739
3740#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
3741
3742#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3743#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3744#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3745#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
3746#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
3747#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
3748#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
3749#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3750#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3751#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
3752#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
3753#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
3754#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
3755#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
3756#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
3757#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
3758#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3759#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3760#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
3761#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
3762#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
3763#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
3764#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
3765#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
3766
3767#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
3768#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
3769#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
3770#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
3771#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
3772
3773#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
3774#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
3775/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
3776 * commit. */
3777#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
3778#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
3779#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
3780
3781#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u16Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
3782#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
3783#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
3784 do { \
3785 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
3786 *pu32Reg += (a_u32Value); \
3787 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
3788 } while (0)
3789#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
3790
3791#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
3792#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
3793#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
3794 do { \
3795 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
3796 *pu32Reg -= (a_u32Value); \
3797 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
3798 } while (0)
3799#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
3800
3801#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u16Value, a_iGReg) (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg))
3802#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg))
3803#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg))
3804#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg))
3805#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
3806#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
3807#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
3808
3809#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
3810#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
3811#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
3812
3813#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
3814#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
3815#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
3816
3817#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
3818#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
3819#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
3820
3821#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
3822#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
3823#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
3824
3825
3826#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
3827#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
3828
3829
3830
3831#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
3832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
3833#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
3834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
3835#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3836 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
3837#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
3839#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
3841
3842#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
3843 do { \
3844 uint8_t u8Tmp; \
3845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3846 (a_u16Dst) = u8Tmp; \
3847 } while (0)
3848#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3849 do { \
3850 uint8_t u8Tmp; \
3851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3852 (a_u32Dst) = u8Tmp; \
3853 } while (0)
3854#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3855 do { \
3856 uint8_t u8Tmp; \
3857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3858 (a_u64Dst) = u8Tmp; \
3859 } while (0)
3860#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3861 do { \
3862 uint16_t u16Tmp; \
3863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3864 (a_u32Dst) = u16Tmp; \
3865 } while (0)
3866#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3867 do { \
3868 uint16_t u16Tmp; \
3869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3870 (a_u64Dst) = u16Tmp; \
3871 } while (0)
3872#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3873 do { \
3874 uint32_t u32Tmp; \
3875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
3876 (a_u64Dst) = u32Tmp; \
3877 } while (0)
3878
3879#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
3880 do { \
3881 uint8_t u8Tmp; \
3882 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3883 (a_u16Dst) = (int8_t)u8Tmp; \
3884 } while (0)
3885#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3886 do { \
3887 uint8_t u8Tmp; \
3888 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3889 (a_u32Dst) = (int8_t)u8Tmp; \
3890 } while (0)
3891#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3892 do { \
3893 uint8_t u8Tmp; \
3894 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
3895 (a_u64Dst) = (int8_t)u8Tmp; \
3896 } while (0)
3897#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
3898 do { \
3899 uint16_t u16Tmp; \
3900 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3901 (a_u32Dst) = (int16_t)u16Tmp; \
3902 } while (0)
3903#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3904 do { \
3905 uint16_t u16Tmp; \
3906 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
3907 (a_u64Dst) = (int16_t)u16Tmp; \
3908 } while (0)
3909#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
3910 do { \
3911 uint32_t u32Tmp; \
3912 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
3913 (a_u64Dst) = (int32_t)u32Tmp; \
3914 } while (0)
3915
3916#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
3917 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
3918#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
3919 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
3920#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
3921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
3922#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
3923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
3924
3925#define IEM_MC_PUSH_U16(a_u16Value) \
3926 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
3927#define IEM_MC_PUSH_U32(a_u32Value) \
3928 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
3929#define IEM_MC_PUSH_U64(a_u64Value) \
3930 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
3931
3932#define IEM_MC_POP_U16(a_pu16Value) \
3933 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
3934#define IEM_MC_POP_U32(a_pu32Value) \
3935 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
3936#define IEM_MC_POP_U64(a_pu64Value) \
3937 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
3938
3939/** Maps guest memory for direct or bounce buffered access.
3940 * The purpose is to pass it to an operand implementation, thus the a_iArg.
3941 * @remarks May return.
3942 */
3943#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
3944 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
3945
3946/** Maps guest memory for direct or bounce buffered access.
3947 * The purpose is to pass it to an operand implementation, thus the a_iArg.
3948 * @remarks May return.
3949 */
3950#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
3951 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
3952
3953/** Commits the memory and unmaps the guest memory.
3954 * @remarks May return.
3955 */
3956#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
3957 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
3958
3959/** Calculate efficient address from R/M. */
3960#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
3961 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
3962
3963#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
3964#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
3965#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
3966#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
3967
3968/**
3969 * Defers the rest of the instruction emulation to a C implementation routine
3970 * and returns, only taking the standard parameters.
3971 *
3972 * @param a_pfnCImpl The pointer to the C routine.
3973 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
3974 */
3975#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
3976
3977/**
3978 * Defers the rest of instruction emulation to a C implementation routine and
3979 * returns, taking one argument in addition to the standard ones.
3980 *
3981 * @param a_pfnCImpl The pointer to the C routine.
3982 * @param a0 The argument.
3983 */
3984#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
3985
3986/**
3987 * Defers the rest of the instruction emulation to a C implementation routine
3988 * and returns, taking two arguments in addition to the standard ones.
3989 *
3990 * @param a_pfnCImpl The pointer to the C routine.
3991 * @param a0 The first extra argument.
3992 * @param a1 The second extra argument.
3993 */
3994#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
3995
3996/**
3997 * Defers the rest of the instruction emulation to a C implementation routine
3998 * and returns, taking two arguments in addition to the standard ones.
3999 *
4000 * @param a_pfnCImpl The pointer to the C routine.
4001 * @param a0 The first extra argument.
4002 * @param a1 The second extra argument.
4003 * @param a2 The third extra argument.
4004 */
4005#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
4006
4007/**
4008 * Defers the rest of the instruction emulation to a C implementation routine
4009 * and returns, taking two arguments in addition to the standard ones.
4010 *
4011 * @param a_pfnCImpl The pointer to the C routine.
4012 * @param a0 The first extra argument.
4013 * @param a1 The second extra argument.
4014 * @param a2 The third extra argument.
4015 * @param a3 The fourth extra argument.
4016 * @param a4 The fifth extra argument.
4017 */
4018#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
4019
4020/**
4021 * Defers the entire instruction emulation to a C implementation routine and
4022 * returns, only taking the standard parameters.
4023 *
4024 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4025 *
4026 * @param a_pfnCImpl The pointer to the C routine.
4027 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
4028 */
4029#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
4030
4031/**
4032 * Defers the entire instruction emulation to a C implementation routine and
4033 * returns, taking one argument in addition to the standard ones.
4034 *
4035 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4036 *
4037 * @param a_pfnCImpl The pointer to the C routine.
4038 * @param a0 The argument.
4039 */
4040#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
4041
4042/**
4043 * Defers the entire instruction emulation to a C implementation routine and
4044 * returns, taking two arguments in addition to the standard ones.
4045 *
4046 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4047 *
4048 * @param a_pfnCImpl The pointer to the C routine.
4049 * @param a0 The first extra argument.
4050 * @param a1 The second extra argument.
4051 */
4052#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
4053
4054/**
4055 * Defers the entire instruction emulation to a C implementation routine and
4056 * returns, taking three arguments in addition to the standard ones.
4057 *
4058 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4059 *
4060 * @param a_pfnCImpl The pointer to the C routine.
4061 * @param a0 The first extra argument.
4062 * @param a1 The second extra argument.
4063 * @param a2 The third extra argument.
4064 */
4065#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
4066
4067#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
4068#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
4069#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
4070#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
4071#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
4072 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4073 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4074#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
4075 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4076 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4077#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
4078 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
4079 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4080 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4081#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
4082 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
4083 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4084 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4085#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
4086#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
4087#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
4088#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4089 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
4090 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4091#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4092 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
4093 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4094#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4095 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
4096 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4097#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4098 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
4099 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4100#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4101 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
4102 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4103#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4104 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
4105 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4106#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
4107#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
4108#define IEM_MC_ELSE() } else {
4109#define IEM_MC_ENDIF() } do {} while (0)
4110
4111/** @} */
4112
4113
4114/** @name Opcode Debug Helpers.
4115 * @{
4116 */
4117#ifdef DEBUG
4118# define IEMOP_MNEMONIC(a_szMnemonic) \
4119 Log2(("decode - %04x:%08RGv %s%s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
4120 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic))
4121# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
4122 Log2(("decode - %04x:%08RGv %s%s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
4123 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps))
4124#else
4125# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
4126# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
4127#endif
4128
4129/** @} */
4130
4131
4132/** @name Opcode Helpers.
4133 * @{
4134 */
4135
4136/** The instruction allows no lock prefixing (in this encoding), throw #UD if
4137 * lock prefixed. */
4138#define IEMOP_HLP_NO_LOCK_PREFIX() \
4139 do \
4140 { \
4141 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
4142 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
4143 } while (0)
4144
4145/** The instruction is not available in 64-bit mode, throw #UD if we're in
4146 * 64-bit mode. */
4147#define IEMOP_HLP_NO_64BIT() \
4148 do \
4149 { \
4150 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
4151 return IEMOP_RAISE_INVALID_OPCODE(); \
4152 } while (0)
4153
4154/** The instruction defaults to 64-bit operand size if 64-bit mode. */
4155#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
4156 do \
4157 { \
4158 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
4159 iemRecalEffOpSize64Default(pIemCpu); \
4160 } while (0)
4161
4162
4163
4164/**
4165 * Calculates the effective address of a ModR/M memory operand.
4166 *
4167 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
4168 *
4169 * @return Strict VBox status code.
4170 * @param pIemCpu The IEM per CPU data.
4171 * @param bRm The ModRM byte.
4172 * @param pGCPtrEff Where to return the effective address.
4173 */
4174static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
4175{
4176 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
4177 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4178#define SET_SS_DEF() \
4179 do \
4180 { \
4181 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
4182 pIemCpu->iEffSeg = X86_SREG_SS; \
4183 } while (0)
4184
4185/** @todo Check the effective address size crap! */
4186 switch (pIemCpu->enmEffAddrMode)
4187 {
4188 case IEMMODE_16BIT:
4189 {
4190 uint16_t u16EffAddr;
4191
4192 /* Handle the disp16 form with no registers first. */
4193 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
4194 IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr);
4195 else
4196 {
4197 /* Get the displacment. */
4198 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
4199 {
4200 case 0: u16EffAddr = 0; break;
4201 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(pIemCpu, &u16EffAddr); break;
4202 case 2: IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr); break;
4203 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
4204 }
4205
4206 /* Add the base and index registers to the disp. */
4207 switch (bRm & X86_MODRM_RM_MASK)
4208 {
4209 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
4210 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
4211 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
4212 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
4213 case 4: u16EffAddr += pCtx->si; break;
4214 case 5: u16EffAddr += pCtx->di; break;
4215 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
4216 case 7: u16EffAddr += pCtx->bx; break;
4217 }
4218 }
4219
4220 *pGCPtrEff = u16EffAddr;
4221 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
4222 return VINF_SUCCESS;
4223 }
4224
4225 case IEMMODE_32BIT:
4226 {
4227 uint32_t u32EffAddr;
4228
4229 /* Handle the disp32 form with no registers first. */
4230 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
4231 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32EffAddr);
4232 else
4233 {
4234 /* Get the register (or SIB) value. */
4235 switch ((bRm & X86_MODRM_RM_MASK))
4236 {
4237 case 0: u32EffAddr = pCtx->eax; break;
4238 case 1: u32EffAddr = pCtx->ecx; break;
4239 case 2: u32EffAddr = pCtx->edx; break;
4240 case 3: u32EffAddr = pCtx->ebx; break;
4241 case 4: /* SIB */
4242 {
4243 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
4244
4245 /* Get the index and scale it. */
4246 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
4247 {
4248 case 0: u32EffAddr = pCtx->eax; break;
4249 case 1: u32EffAddr = pCtx->ecx; break;
4250 case 2: u32EffAddr = pCtx->edx; break;
4251 case 3: u32EffAddr = pCtx->ebx; break;
4252 case 4: u32EffAddr = 0; /*none */ break;
4253 case 5: u32EffAddr = pCtx->ebp; break;
4254 case 6: u32EffAddr = pCtx->esi; break;
4255 case 7: u32EffAddr = pCtx->edi; break;
4256 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4257 }
4258 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
4259
4260 /* add base */
4261 switch (bSib & X86_SIB_BASE_MASK)
4262 {
4263 case 0: u32EffAddr += pCtx->eax; break;
4264 case 1: u32EffAddr += pCtx->ecx; break;
4265 case 2: u32EffAddr += pCtx->edx; break;
4266 case 3: u32EffAddr += pCtx->ebx; break;
4267 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
4268 case 5:
4269 if ((bRm & X86_MODRM_MOD_MASK) != 0)
4270 {
4271 u32EffAddr += pCtx->ebp;
4272 SET_SS_DEF();
4273 }
4274 else
4275 {
4276 uint32_t u32Disp;
4277 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4278 u32EffAddr += u32Disp;
4279 }
4280 break;
4281 case 6: u32EffAddr += pCtx->esi; break;
4282 case 7: u32EffAddr += pCtx->edi; break;
4283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4284 }
4285 break;
4286 }
4287 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
4288 case 6: u32EffAddr = pCtx->esi; break;
4289 case 7: u32EffAddr = pCtx->edi; break;
4290 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4291 }
4292
4293 /* Get and add the displacement. */
4294 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
4295 {
4296 case 0:
4297 break;
4298 case 1:
4299 {
4300 int8_t i8Disp;
4301 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
4302 u32EffAddr += i8Disp;
4303 break;
4304 }
4305 case 2:
4306 {
4307 uint32_t u32Disp;
4308 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4309 u32EffAddr += u32Disp;
4310 break;
4311 }
4312 default:
4313 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
4314 }
4315
4316 }
4317 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
4318 *pGCPtrEff = u32EffAddr;
4319 else
4320 {
4321 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
4322 *pGCPtrEff = u32EffAddr & UINT16_MAX;
4323 }
4324 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
4325 return VINF_SUCCESS;
4326 }
4327
4328 case IEMMODE_64BIT:
4329 {
4330 uint64_t u64EffAddr;
4331
4332 /* Handle the rip+disp32 form with no registers first. */
4333 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
4334 {
4335 IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64EffAddr);
4336 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
4337 }
4338 else
4339 {
4340 /* Get the register (or SIB) value. */
4341 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
4342 {
4343 case 0: u64EffAddr = pCtx->rax; break;
4344 case 1: u64EffAddr = pCtx->rcx; break;
4345 case 2: u64EffAddr = pCtx->rdx; break;
4346 case 3: u64EffAddr = pCtx->rbx; break;
4347 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
4348 case 6: u64EffAddr = pCtx->rsi; break;
4349 case 7: u64EffAddr = pCtx->rdi; break;
4350 case 8: u64EffAddr = pCtx->r8; break;
4351 case 9: u64EffAddr = pCtx->r9; break;
4352 case 10: u64EffAddr = pCtx->r10; break;
4353 case 11: u64EffAddr = pCtx->r11; break;
4354 case 13: u64EffAddr = pCtx->r13; break;
4355 case 14: u64EffAddr = pCtx->r14; break;
4356 case 15: u64EffAddr = pCtx->r15; break;
4357 /* SIB */
4358 case 4:
4359 case 12:
4360 {
4361 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);
4362
4363 /* Get the index and scale it. */
4364 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
4365 {
4366 case 0: u64EffAddr = pCtx->rax; break;
4367 case 1: u64EffAddr = pCtx->rcx; break;
4368 case 2: u64EffAddr = pCtx->rdx; break;
4369 case 3: u64EffAddr = pCtx->rbx; break;
4370 case 4: u64EffAddr = 0; /*none */ break;
4371 case 5: u64EffAddr = pCtx->rbp; break;
4372 case 6: u64EffAddr = pCtx->rsi; break;
4373 case 7: u64EffAddr = pCtx->rdi; break;
4374 case 8: u64EffAddr = pCtx->r8; break;
4375 case 9: u64EffAddr = pCtx->r9; break;
4376 case 10: u64EffAddr = pCtx->r10; break;
4377 case 11: u64EffAddr = pCtx->r11; break;
4378 case 12: u64EffAddr = pCtx->r12; break;
4379 case 13: u64EffAddr = pCtx->r13; break;
4380 case 14: u64EffAddr = pCtx->r14; break;
4381 case 15: u64EffAddr = pCtx->r15; break;
4382 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4383 }
4384 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
4385
4386 /* add base */
4387 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
4388 {
4389 case 0: u64EffAddr += pCtx->rax; break;
4390 case 1: u64EffAddr += pCtx->rcx; break;
4391 case 2: u64EffAddr += pCtx->rdx; break;
4392 case 3: u64EffAddr += pCtx->rbx; break;
4393 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
4394 case 6: u64EffAddr += pCtx->rsi; break;
4395 case 7: u64EffAddr += pCtx->rdi; break;
4396 case 8: u64EffAddr += pCtx->r8; break;
4397 case 9: u64EffAddr += pCtx->r9; break;
4398 case 10: u64EffAddr += pCtx->r10; break;
4399 case 11: u64EffAddr += pCtx->r11; break;
4400 case 14: u64EffAddr += pCtx->r14; break;
4401 case 15: u64EffAddr += pCtx->r15; break;
4402 /* complicated encodings */
4403 case 5:
4404 case 13:
4405 if ((bRm & X86_MODRM_MOD_MASK) != 0)
4406 {
4407 if (!pIemCpu->uRexB)
4408 {
4409 u64EffAddr += pCtx->rbp;
4410 SET_SS_DEF();
4411 }
4412 else
4413 u64EffAddr += pCtx->r13;
4414 }
4415 else
4416 {
4417 uint32_t u32Disp;
4418 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4419 u64EffAddr += (int32_t)u32Disp;
4420 }
4421 break;
4422 }
4423 break;
4424 }
4425 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4426 }
4427
4428 /* Get and add the displacement. */
4429 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
4430 {
4431 case 0:
4432 break;
4433 case 1:
4434 {
4435 int8_t i8Disp;
4436 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);
4437 u64EffAddr += i8Disp;
4438 break;
4439 }
4440 case 2:
4441 {
4442 uint32_t u32Disp;
4443 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);
4444 u64EffAddr += (int32_t)u32Disp;
4445 break;
4446 }
4447 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
4448 }
4449
4450 }
4451 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
4452 *pGCPtrEff = u64EffAddr;
4453 else
4454 *pGCPtrEff = u64EffAddr & UINT16_MAX;
4455 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
4456 return VINF_SUCCESS;
4457 }
4458 }
4459
4460 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4461}
4462
4463/** @} */
4464
4465
4466
4467/*
4468 * Include the instructions
4469 */
4470#include "IEMAllInstructions.cpp.h"
4471
4472
4473
4474
4475#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
4476
4477/**
4478 * Sets up execution verification mode.
4479 */
4480static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
4481{
4482 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
4483 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */
4484
4485#if 0
4486 // Auto enable; DSL.
4487 if ( pIemCpu->fNoRem
4488 && pOrgCtx->cs == 0x10
4489 && ( pOrgCtx->rip == 0x00100fc7
4490 || pOrgCtx->rip == 0x00100ffc
4491 || pOrgCtx->rip == 0x00100ffe
4492 )
4493 )
4494 {
4495 RTLogFlags(NULL, "enabled");
4496 pIemCpu->fNoRem = false;
4497 }
4498#endif
4499
4500 /*
4501 * Switch state.
4502 */
4503 if (IEM_VERIFICATION_ENABLED(pIemCpu))
4504 {
4505 static CPUMCTX s_DebugCtx; /* Ugly! */
4506
4507 s_DebugCtx = *pOrgCtx;
4508 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
4509 }
4510
4511 /*
4512 * See if there is an interrupt pending in TRPM and inject it if we can.
4513 */
4514 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4515 if ( pOrgCtx->eflags.Bits.u1IF
4516 && TRPMHasTrap(pVCpu)
4517 //&& TRPMIsSoftwareInterrupt(pVCpu)
4518 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
4519 {
4520 Log(("Injecting trap %#x\n", TRPMGetTrapNo(pVCpu)));
4521 iemCImpl_int(pIemCpu, 0, TRPMGetTrapNo(pVCpu), false);
4522 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4523 TRPMResetTrap(pVCpu);
4524 }
4525
4526 /*
4527 * Reset the counters.
4528 */
4529 pIemCpu->cIOReads = 0;
4530 pIemCpu->cIOWrites = 0;
4531 pIemCpu->fUndefinedEFlags = 0;
4532
4533 if (IEM_VERIFICATION_ENABLED(pIemCpu))
4534 {
4535 /*
4536 * Free all verification records.
4537 */
4538 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
4539 pIemCpu->pIemEvtRecHead = NULL;
4540 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
4541 do
4542 {
4543 while (pEvtRec)
4544 {
4545 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
4546 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
4547 pIemCpu->pFreeEvtRec = pEvtRec;
4548 pEvtRec = pNext;
4549 }
4550 pEvtRec = pIemCpu->pOtherEvtRecHead;
4551 pIemCpu->pOtherEvtRecHead = NULL;
4552 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
4553 } while (pEvtRec);
4554 }
4555}
4556
4557
4558/**
4559 * Allocate an event record.
4560 * @returns Poitner to a record.
4561 */
4562static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
4563{
4564 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4565 return NULL;
4566
4567 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
4568 if (pEvtRec)
4569 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
4570 else
4571 {
4572 if (!pIemCpu->ppIemEvtRecNext)
4573 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
4574
4575 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
4576 if (!pEvtRec)
4577 return NULL;
4578 }
4579 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
4580 pEvtRec->pNext = NULL;
4581 return pEvtRec;
4582}
4583
4584
4585/**
4586 * IOMMMIORead notification.
4587 */
4588VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
4589{
4590 PVMCPU pVCpu = VMMGetCpu(pVM);
4591 if (!pVCpu)
4592 return;
4593 PIEMCPU pIemCpu = &pVCpu->iem.s;
4594 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4595 if (!pEvtRec)
4596 return;
4597 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4598 pEvtRec->u.RamRead.GCPhys = GCPhys;
4599 pEvtRec->u.RamRead.cb = cbValue;
4600 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4601 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4602}
4603
4604
4605/**
4606 * IOMMMIOWrite notification.
4607 */
4608VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
4609{
4610 PVMCPU pVCpu = VMMGetCpu(pVM);
4611 if (!pVCpu)
4612 return;
4613 PIEMCPU pIemCpu = &pVCpu->iem.s;
4614 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4615 if (!pEvtRec)
4616 return;
4617 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4618 pEvtRec->u.RamWrite.GCPhys = GCPhys;
4619 pEvtRec->u.RamWrite.cb = cbValue;
4620 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
4621 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
4622 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
4623 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
4624 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4625 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4626}
4627
4628
4629/**
4630 * IOMIOPortRead notification.
4631 */
4632VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
4633{
4634 PVMCPU pVCpu = VMMGetCpu(pVM);
4635 if (!pVCpu)
4636 return;
4637 PIEMCPU pIemCpu = &pVCpu->iem.s;
4638 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4639 if (!pEvtRec)
4640 return;
4641 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
4642 pEvtRec->u.IOPortRead.Port = Port;
4643 pEvtRec->u.IOPortRead.cbValue = cbValue;
4644 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4645 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4646}
4647
4648/**
4649 * IOMIOPortWrite notification.
4650 */
4651VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
4652{
4653 PVMCPU pVCpu = VMMGetCpu(pVM);
4654 if (!pVCpu)
4655 return;
4656 PIEMCPU pIemCpu = &pVCpu->iem.s;
4657 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4658 if (!pEvtRec)
4659 return;
4660 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
4661 pEvtRec->u.IOPortWrite.Port = Port;
4662 pEvtRec->u.IOPortWrite.cbValue = cbValue;
4663 pEvtRec->u.IOPortWrite.u32Value = u32Value;
4664 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
4665 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
4666}
4667
4668
4669VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
4670{
4671 AssertFailed();
4672}
4673
4674
4675VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
4676{
4677 AssertFailed();
4678}
4679
4680
4681/**
4682 * Fakes and records an I/O port read.
4683 *
4684 * @returns VINF_SUCCESS.
4685 * @param pIemCpu The IEM per CPU data.
4686 * @param Port The I/O port.
4687 * @param pu32Value Where to store the fake value.
4688 * @param cbValue The size of the access.
4689 */
4690static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
4691{
4692 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4693 if (pEvtRec)
4694 {
4695 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
4696 pEvtRec->u.IOPortRead.Port = Port;
4697 pEvtRec->u.IOPortRead.cbValue = cbValue;
4698 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4699 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4700 }
4701 pIemCpu->cIOReads++;
4702 *pu32Value = 0xffffffff;
4703 return VINF_SUCCESS;
4704}
4705
4706
4707/**
4708 * Fakes and records an I/O port write.
4709 *
4710 * @returns VINF_SUCCESS.
4711 * @param pIemCpu The IEM per CPU data.
4712 * @param Port The I/O port.
4713 * @param u32Value The value being written.
4714 * @param cbValue The size of the access.
4715 */
4716static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
4717{
4718 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4719 if (pEvtRec)
4720 {
4721 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
4722 pEvtRec->u.IOPortWrite.Port = Port;
4723 pEvtRec->u.IOPortWrite.cbValue = cbValue;
4724 pEvtRec->u.IOPortWrite.u32Value = u32Value;
4725 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4726 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4727 }
4728 pIemCpu->cIOWrites++;
4729 return VINF_SUCCESS;
4730}
4731
4732
4733/**
4734 * Used to add extra details about a stub case.
4735 * @param pIemCpu The IEM per CPU state.
4736 */
4737static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
4738{
4739 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4740 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4741 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4742 char szRegs[4096];
4743 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4744 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4745 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4746 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4747 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4748 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4749 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4750 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4751 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4752 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4753 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4754 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4755 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4756 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4757 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4758 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4759 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4760 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4761 " efer=%016VR{efer}\n"
4762 " pat=%016VR{pat}\n"
4763 " sf_mask=%016VR{sf_mask}\n"
4764 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4765 " lstar=%016VR{lstar}\n"
4766 " star=%016VR{star} cstar=%016VR{cstar}\n"
4767 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4768 );
4769
4770 char szInstr1[256];
4771 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
4772 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4773 szInstr1, sizeof(szInstr1), NULL);
4774 char szInstr2[256];
4775 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
4776 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4777 szInstr2, sizeof(szInstr2), NULL);
4778
4779 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
4780}
4781
4782
4783/**
4784 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
4785 * dump to the assertion info.
4786 *
4787 * @param pEvtRec The record to dump.
4788 */
4789static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
4790{
4791 switch (pEvtRec->enmEvent)
4792 {
4793 case IEMVERIFYEVENT_IOPORT_READ:
4794 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
4795 pEvtRec->u.IOPortWrite.Port,
4796 pEvtRec->u.IOPortWrite.cbValue);
4797 break;
4798 case IEMVERIFYEVENT_IOPORT_WRITE:
4799 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
4800 pEvtRec->u.IOPortWrite.Port,
4801 pEvtRec->u.IOPortWrite.cbValue,
4802 pEvtRec->u.IOPortWrite.u32Value);
4803 break;
4804 case IEMVERIFYEVENT_RAM_READ:
4805 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
4806 pEvtRec->u.RamRead.GCPhys,
4807 pEvtRec->u.RamRead.cb);
4808 break;
4809 case IEMVERIFYEVENT_RAM_WRITE:
4810 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
4811 pEvtRec->u.RamWrite.GCPhys,
4812 pEvtRec->u.RamWrite.cb,
4813 (int)pEvtRec->u.RamWrite.cb,
4814 pEvtRec->u.RamWrite.ab);
4815 break;
4816 default:
4817 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
4818 break;
4819 }
4820}
4821
4822
4823/**
4824 * Raises an assertion on the specified record, showing the given message with
4825 * a record dump attached.
4826 *
4827 * @param pIemCpu The IEM per CPU data.
4828 * @param pEvtRec1 The first record.
4829 * @param pEvtRec2 The second record.
4830 * @param pszMsg The message explaining why we're asserting.
4831 */
4832static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
4833{
4834 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4835 iemVerifyAssertAddRecordDump(pEvtRec1);
4836 iemVerifyAssertAddRecordDump(pEvtRec2);
4837 iemVerifyAssertMsg2(pIemCpu);
4838 RTAssertPanic();
4839}
4840
4841
4842/**
4843 * Raises an assertion on the specified record, showing the given message with
4844 * a record dump attached.
4845 *
4846 * @param pIemCpu The IEM per CPU data.
4847 * @param pEvtRec1 The first record.
4848 * @param pszMsg The message explaining why we're asserting.
4849 */
4850static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
4851{
4852 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4853 iemVerifyAssertAddRecordDump(pEvtRec);
4854 iemVerifyAssertMsg2(pIemCpu);
4855 RTAssertPanic();
4856}
4857
4858
4859/**
4860 * Verifies a write record.
4861 *
4862 * @param pIemCpu The IEM per CPU data.
4863 * @param pEvtRec The write record.
4864 */
4865static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
4866{
4867 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
4868 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
4869 if ( RT_FAILURE(rc)
4870 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
4871 {
4872 /* fend off ins */
4873 if ( !pIemCpu->cIOReads
4874 || pEvtRec->u.RamWrite.ab[0] != 0xcc
4875 || ( pEvtRec->u.RamWrite.cb != 1
4876 && pEvtRec->u.RamWrite.cb != 2
4877 && pEvtRec->u.RamWrite.cb != 4) )
4878 {
4879 /* fend off ROMs */
4880 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
4881 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
4882 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
4883 {
4884 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
4885 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
4886 RTAssertMsg2Add("REM: %.*Rhxs\n"
4887 "IEM: %.*Rhxs\n",
4888 pEvtRec->u.RamWrite.cb, abBuf,
4889 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
4890 iemVerifyAssertAddRecordDump(pEvtRec);
4891 iemVerifyAssertMsg2(pIemCpu);
4892 RTAssertPanic();
4893 }
4894 }
4895 }
4896
4897}
4898
4899/**
4900 * Performs the post-execution verfication checks.
4901 */
4902static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
4903{
4904 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4905 return;
4906
4907 /*
4908 * Switch back the state.
4909 */
4910 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
4911 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
4912 Assert(pOrgCtx != pDebugCtx);
4913 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
4914
4915 /*
4916 * Execute the instruction in REM.
4917 */
4918 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));
4919 AssertRC(rc);
4920
4921 /*
4922 * Compare the register states.
4923 */
4924 unsigned cDiffs = 0;
4925 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
4926 {
4927 Log(("REM and IEM ends up with different registers!\n"));
4928
4929# define CHECK_FIELD(a_Field) \
4930 do \
4931 { \
4932 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
4933 { \
4934 switch (sizeof(pOrgCtx->a_Field)) \
4935 { \
4936 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4937 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4938 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4939 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
4940 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
4941 } \
4942 cDiffs++; \
4943 } \
4944 } while (0)
4945
4946# define CHECK_BIT_FIELD(a_Field) \
4947 do \
4948 { \
4949 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
4950 { \
4951 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
4952 cDiffs++; \
4953 } \
4954 } while (0)
4955
4956# define CHECK_SEL(a_Sel) \
4957 do \
4958 { \
4959 CHECK_FIELD(a_Sel); \
4960 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
4961 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
4962 { \
4963 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
4964 cDiffs++; \
4965 } \
4966 CHECK_FIELD(a_Sel##Hid.u64Base); \
4967 CHECK_FIELD(a_Sel##Hid.u32Limit); \
4968 } while (0)
4969
4970 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
4971 {
4972 if (pIemCpu->cInstructions != 1)
4973 {
4974 RTAssertMsg2Weak(" the FPU state differs\n");
4975 cDiffs++;
4976 CHECK_FIELD(fpu.FCW);
4977 CHECK_FIELD(fpu.FSW);
4978 CHECK_FIELD(fpu.FTW);
4979 CHECK_FIELD(fpu.FOP);
4980 CHECK_FIELD(fpu.FPUIP);
4981 CHECK_FIELD(fpu.CS);
4982 CHECK_FIELD(fpu.Rsrvd1);
4983 CHECK_FIELD(fpu.FPUDP);
4984 CHECK_FIELD(fpu.DS);
4985 CHECK_FIELD(fpu.Rsrvd2);
4986 CHECK_FIELD(fpu.MXCSR);
4987 CHECK_FIELD(fpu.MXCSR_MASK);
4988 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
4989 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
4990 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
4991 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
4992 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
4993 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
4994 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
4995 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
4996 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
4997 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
4998 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
4999 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
5000 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
5001 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
5002 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
5003 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
5004 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
5005 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
5006 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
5007 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
5008 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
5009 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
5010 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
5011 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
5012 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
5013 CHECK_FIELD(fpu.au32RsrvdRest[i]);
5014 }
5015 else
5016 RTAssertMsg2Weak(" the FPU state differs - happens the first time...\n");
5017 }
5018 CHECK_FIELD(rip);
5019 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
5020 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
5021 {
5022 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
5023 CHECK_BIT_FIELD(rflags.Bits.u1CF);
5024 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
5025 CHECK_BIT_FIELD(rflags.Bits.u1PF);
5026 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
5027 CHECK_BIT_FIELD(rflags.Bits.u1AF);
5028 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
5029 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
5030 CHECK_BIT_FIELD(rflags.Bits.u1SF);
5031 CHECK_BIT_FIELD(rflags.Bits.u1TF);
5032 CHECK_BIT_FIELD(rflags.Bits.u1IF);
5033 CHECK_BIT_FIELD(rflags.Bits.u1DF);
5034 CHECK_BIT_FIELD(rflags.Bits.u1OF);
5035 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
5036 CHECK_BIT_FIELD(rflags.Bits.u1NT);
5037 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
5038 CHECK_BIT_FIELD(rflags.Bits.u1RF);
5039 CHECK_BIT_FIELD(rflags.Bits.u1VM);
5040 CHECK_BIT_FIELD(rflags.Bits.u1AC);
5041 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
5042 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
5043 CHECK_BIT_FIELD(rflags.Bits.u1ID);
5044 }
5045
5046 if (pIemCpu->cIOReads != 1)
5047 CHECK_FIELD(rax);
5048 CHECK_FIELD(rcx);
5049 CHECK_FIELD(rdx);
5050 CHECK_FIELD(rbx);
5051 CHECK_FIELD(rsp);
5052 CHECK_FIELD(rbp);
5053 CHECK_FIELD(rsi);
5054 CHECK_FIELD(rdi);
5055 CHECK_FIELD(r8);
5056 CHECK_FIELD(r9);
5057 CHECK_FIELD(r10);
5058 CHECK_FIELD(r11);
5059 CHECK_FIELD(r12);
5060 CHECK_FIELD(r13);
5061 CHECK_SEL(cs);
5062 CHECK_SEL(ss);
5063 CHECK_SEL(ds);
5064 CHECK_SEL(es);
5065 CHECK_SEL(fs);
5066 CHECK_SEL(gs);
5067 CHECK_FIELD(cr0);
5068 CHECK_FIELD(cr2);
5069 CHECK_FIELD(cr3);
5070 CHECK_FIELD(cr4);
5071 CHECK_FIELD(dr[0]);
5072 CHECK_FIELD(dr[1]);
5073 CHECK_FIELD(dr[2]);
5074 CHECK_FIELD(dr[3]);
5075 CHECK_FIELD(dr[6]);
5076 CHECK_FIELD(dr[7]);
5077 CHECK_FIELD(gdtr.cbGdt);
5078 CHECK_FIELD(gdtr.pGdt);
5079 CHECK_FIELD(idtr.cbIdt);
5080 CHECK_FIELD(idtr.pIdt);
5081 CHECK_FIELD(ldtr);
5082 CHECK_FIELD(ldtrHid.u64Base);
5083 CHECK_FIELD(ldtrHid.u32Limit);
5084 CHECK_FIELD(ldtrHid.Attr.u);
5085 CHECK_FIELD(tr);
5086 CHECK_FIELD(trHid.u64Base);
5087 CHECK_FIELD(trHid.u32Limit);
5088 CHECK_FIELD(trHid.Attr.u);
5089 CHECK_FIELD(SysEnter.cs);
5090 CHECK_FIELD(SysEnter.eip);
5091 CHECK_FIELD(SysEnter.esp);
5092 CHECK_FIELD(msrEFER);
5093 CHECK_FIELD(msrSTAR);
5094 CHECK_FIELD(msrPAT);
5095 CHECK_FIELD(msrLSTAR);
5096 CHECK_FIELD(msrCSTAR);
5097 CHECK_FIELD(msrSFMASK);
5098 CHECK_FIELD(msrKERNELGSBASE);
5099
5100 if (cDiffs != 0)
5101 {
5102 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
5103 iemVerifyAssertMsg2(pIemCpu);
5104 RTAssertPanic();
5105 }
5106# undef CHECK_FIELD
5107# undef CHECK_BIT_FIELD
5108 }
5109
5110 /*
5111 * If the register state compared fine, check the verification event
5112 * records.
5113 */
5114 if (cDiffs == 0)
5115 {
5116 /*
5117 * Compare verficiation event records.
5118 * - I/O port accesses should be a 1:1 match.
5119 */
5120 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
5121 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
5122 while (pIemRec && pOtherRec)
5123 {
5124 /* Since we might miss RAM writes and reads, ignore reads and check
5125 that any written memory is the same extra ones. */
5126 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
5127 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
5128 && pIemRec->pNext)
5129 {
5130 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
5131 iemVerifyWriteRecord(pIemCpu, pIemRec);
5132 pIemRec = pIemRec->pNext;
5133 }
5134
5135 /* Do the compare. */
5136 if (pIemRec->enmEvent != pOtherRec->enmEvent)
5137 {
5138 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
5139 break;
5140 }
5141 bool fEquals;
5142 switch (pIemRec->enmEvent)
5143 {
5144 case IEMVERIFYEVENT_IOPORT_READ:
5145 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
5146 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
5147 break;
5148 case IEMVERIFYEVENT_IOPORT_WRITE:
5149 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
5150 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
5151 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
5152 break;
5153 case IEMVERIFYEVENT_RAM_READ:
5154 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
5155 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
5156 break;
5157 case IEMVERIFYEVENT_RAM_WRITE:
5158 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
5159 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
5160 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
5161 break;
5162 default:
5163 fEquals = false;
5164 break;
5165 }
5166 if (!fEquals)
5167 {
5168 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
5169 break;
5170 }
5171
5172 /* advance */
5173 pIemRec = pIemRec->pNext;
5174 pOtherRec = pOtherRec->pNext;
5175 }
5176
5177 /* Ignore extra writes and reads. */
5178 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
5179 {
5180 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
5181 iemVerifyWriteRecord(pIemCpu, pIemRec);
5182 pIemRec = pIemRec->pNext;
5183 }
5184 if (pIemRec != NULL)
5185 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
5186 else if (pOtherRec != NULL)
5187 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
5188 }
5189 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
5190
5191 /*
5192 * HACK ALERT! You don't normally want to verify a whole boot sequence.
5193 */
5194 if (pIemCpu->cInstructions == 1)
5195 RTLogFlags(NULL, "disabled");
5196}
5197
5198#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
5199
5200/* stubs */
5201static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
5202{
5203 return VERR_INTERNAL_ERROR;
5204}
5205
5206static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
5207{
5208 return VERR_INTERNAL_ERROR;
5209}
5210
5211#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
5212
5213
5214/**
5215 * Execute one instruction.
5216 *
5217 * @return Strict VBox status code.
5218 * @param pVCpu The current virtual CPU.
5219 */
5220VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
5221{
5222 PIEMCPU pIemCpu = &pVCpu->iem.s;
5223
5224#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5225 iemExecVerificationModeSetup(pIemCpu);
5226#endif
5227#ifdef LOG_ENABLED
5228 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5229 if (LogIs2Enabled())
5230 {
5231 char szInstr[256];
5232 uint32_t cbInstr = 0;
5233 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
5234 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5235 szInstr, sizeof(szInstr), &cbInstr);
5236
5237 Log2(("**** "
5238 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
5239 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
5240 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
5241 " %s\n"
5242 ,
5243 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
5244 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
5245 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
5246 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
5247 szInstr));
5248 }
5249#endif
5250
5251 /*
5252 * Do the decoding and emulation.
5253 */
5254 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
5255 if (rcStrict != VINF_SUCCESS)
5256 return rcStrict;
5257
5258 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5259 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
5260 if (rcStrict == VINF_SUCCESS)
5261 pIemCpu->cInstructions++;
5262//#ifdef DEBUG
5263// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
5264//#endif
5265
5266 /* Execute the next instruction as well if a cli, pop ss or
5267 mov ss, Gr has just completed successfully. */
5268 if ( rcStrict == VINF_SUCCESS
5269 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5270 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
5271 {
5272 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
5273 if (rcStrict == VINF_SUCCESS)
5274 {
5275 b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);
5276 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
5277 if (rcStrict == VINF_SUCCESS)
5278 pIemCpu->cInstructions++;
5279 }
5280 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
5281 }
5282
5283 /*
5284 * Assert some sanity.
5285 */
5286#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5287 iemExecVerificationModeCheck(pIemCpu);
5288#endif
5289 return rcStrict;
5290}
5291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette