VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 37084

Last change on this file since 37084 was 37084, checked in by vboxsync, 14 years ago

IEM: xadd

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 220.6 KB
Line 
1/* $Id: IEMAll.cpp 37084 2011-05-13 19:53:02Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 */
43
44/*******************************************************************************
45* Header Files *
46*******************************************************************************/
47#define LOG_GROUP LOG_GROUP_IEM
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/pgm.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/em.h>
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/dbgf.h>
54#ifdef IEM_VERIFICATION_MODE
55# include <VBox/vmm/rem.h>
56# include <VBox/vmm/mm.h>
57#endif
58#include "IEMInternal.h"
59#include <VBox/vmm/vm.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/x86.h>
64#include <iprt/assert.h>
65#include <iprt/string.h>
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/**
72 * Generic pointer union.
73 * @todo move me to iprt/types.h
74 */
75typedef union RTPTRUNION
76{
77 /** Pointer into the void... */
78 void *pv;
79 /** Pointer to a 8-bit unsigned value. */
80 uint8_t *pu8;
81 /** Pointer to a 16-bit unsigned value. */
82 uint16_t *pu16;
83 /** Pointer to a 32-bit unsigned value. */
84 uint32_t *pu32;
85 /** Pointer to a 64-bit unsigned value. */
86 uint64_t *pu64;
87} RTPTRUNION;
88/** Pointer to a pointer union. */
89typedef RTPTRUNION *PRTPTRUNION;
90
91/**
92 * Generic const pointer union.
93 * @todo move me to iprt/types.h
94 */
95typedef union RTCPTRUNION
96{
97 /** Pointer into the void... */
98 void const *pv;
99 /** Pointer to a 8-bit unsigned value. */
100 uint8_t const *pu8;
101 /** Pointer to a 16-bit unsigned value. */
102 uint16_t const *pu16;
103 /** Pointer to a 32-bit unsigned value. */
104 uint32_t const *pu32;
105 /** Pointer to a 64-bit unsigned value. */
106 uint64_t const *pu64;
107} RTCPTRUNION;
108/** Pointer to a const pointer union. */
109typedef RTCPTRUNION *PRTCPTRUNION;
110
111/** @typedef PFNIEMOP
112 * Pointer to an opcode decoder function.
113 */
114
115/** @def FNIEMOP_DEF
116 * Define an opcode decoder function.
117 *
118 * We're using macors for this so that adding and removing parameters as well as
119 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
120 *
121 * @param a_Name The function name.
122 */
123
124
125#if defined(__GNUC__) && defined(RT_ARCH_X86)
126typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
127# define FNIEMOP_DEF(a_Name) \
128 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
129# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
130 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
131# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
132 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
133
134#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
135typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
136# define FNIEMOP_DEF(a_Name) \
137 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
138# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
139 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
140# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
141 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
142
143#elif defined(__GNUC__)
144typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
145# define FNIEMOP_DEF(a_Name) \
146 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
147# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
148 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
149# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
150 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
151
152#else
153typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
154# define FNIEMOP_DEF(a_Name) \
155 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
156# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
157 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
158# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
159 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
160
161#endif
162
163
164/**
165 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
166 */
167typedef union IEMSELDESC
168{
169 /** The legacy view. */
170 X86DESC Legacy;
171 /** The long mode view. */
172 X86DESC64 Long;
173} IEMSELDESC;
174/** Pointer to a selector descriptor table entry. */
175typedef IEMSELDESC *PIEMSELDESC;
176
177
178/*******************************************************************************
179* Defined Constants And Macros *
180*******************************************************************************/
181/** @name IEM status codes.
182 *
183 * Not quite sure how this will play out in the end, just aliasing safe status
184 * codes for now.
185 *
186 * @{ */
187#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
188/** @} */
189
190/** Temporary hack to disable the double execution. Will be removed in favor
191 * of a dedicated execution mode in EM. */
192//#define IEM_VERIFICATION_MODE_NO_REM
193
194/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
195 * due to GCC lacking knowledge about the value range of a switch. */
196#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)
197
198/**
199 * Call an opcode decoder function.
200 *
201 * We're using macors for this so that adding and removing parameters can be
202 * done as we please. See FNIEMOP_DEF.
203 */
204#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
205
206/**
207 * Call a common opcode decoder function taking one extra argument.
208 *
209 * We're using macors for this so that adding and removing parameters can be
210 * done as we please. See FNIEMOP_DEF_1.
211 */
212#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
213
214/**
215 * Call a common opcode decoder function taking one extra argument.
216 *
217 * We're using macors for this so that adding and removing parameters can be
218 * done as we please. See FNIEMOP_DEF_1.
219 */
220#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
221
222/**
223 * Check if we're currently executing in real or virtual 8086 mode.
224 *
225 * @returns @c true if it is, @c false if not.
226 * @param a_pIemCpu The IEM state of the current CPU.
227 */
228#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
229
230/**
231 * Check if we're currently executing in long mode.
232 *
233 * @returns @c true if it is, @c false if not.
234 * @param a_pIemCpu The IEM state of the current CPU.
235 */
236#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
237
238/**
239 * Check if we're currently executing in real mode.
240 *
241 * @returns @c true if it is, @c false if not.
242 * @param a_pIemCpu The IEM state of the current CPU.
243 */
244#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
245
246/**
247 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
248 */
249#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
250
251/**
252 * Checks if a intel CPUID feature is present.
253 */
254#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
255 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
256 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
257
258/**
259 * Check if the address is canonical.
260 */
261#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
262
263
264/*******************************************************************************
265* Global Variables *
266*******************************************************************************/
267extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
268
269
270/** Function table for the ADD instruction. */
271static const IEMOPBINSIZES g_iemAImpl_add =
272{
273 iemAImpl_add_u8, iemAImpl_add_u8_locked,
274 iemAImpl_add_u16, iemAImpl_add_u16_locked,
275 iemAImpl_add_u32, iemAImpl_add_u32_locked,
276 iemAImpl_add_u64, iemAImpl_add_u64_locked
277};
278
279/** Function table for the ADC instruction. */
280static const IEMOPBINSIZES g_iemAImpl_adc =
281{
282 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
283 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
284 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
285 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
286};
287
288/** Function table for the SUB instruction. */
289static const IEMOPBINSIZES g_iemAImpl_sub =
290{
291 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
292 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
293 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
294 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
295};
296
297/** Function table for the SBB instruction. */
298static const IEMOPBINSIZES g_iemAImpl_sbb =
299{
300 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
301 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
302 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
303 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
304};
305
306/** Function table for the OR instruction. */
307static const IEMOPBINSIZES g_iemAImpl_or =
308{
309 iemAImpl_or_u8, iemAImpl_or_u8_locked,
310 iemAImpl_or_u16, iemAImpl_or_u16_locked,
311 iemAImpl_or_u32, iemAImpl_or_u32_locked,
312 iemAImpl_or_u64, iemAImpl_or_u64_locked
313};
314
315/** Function table for the XOR instruction. */
316static const IEMOPBINSIZES g_iemAImpl_xor =
317{
318 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
319 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
320 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
321 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
322};
323
324/** Function table for the AND instruction. */
325static const IEMOPBINSIZES g_iemAImpl_and =
326{
327 iemAImpl_and_u8, iemAImpl_and_u8_locked,
328 iemAImpl_and_u16, iemAImpl_and_u16_locked,
329 iemAImpl_and_u32, iemAImpl_and_u32_locked,
330 iemAImpl_and_u64, iemAImpl_and_u64_locked
331};
332
333/** Function table for the CMP instruction.
334 * @remarks Making operand order ASSUMPTIONS.
335 */
336static const IEMOPBINSIZES g_iemAImpl_cmp =
337{
338 iemAImpl_cmp_u8, NULL,
339 iemAImpl_cmp_u16, NULL,
340 iemAImpl_cmp_u32, NULL,
341 iemAImpl_cmp_u64, NULL
342};
343
344/** Function table for the TEST instruction.
345 * @remarks Making operand order ASSUMPTIONS.
346 */
347static const IEMOPBINSIZES g_iemAImpl_test =
348{
349 iemAImpl_test_u8, NULL,
350 iemAImpl_test_u16, NULL,
351 iemAImpl_test_u32, NULL,
352 iemAImpl_test_u64, NULL
353};
354
355/** Function table for the BT instruction. */
356static const IEMOPBINSIZES g_iemAImpl_bt =
357{
358 NULL, NULL,
359 iemAImpl_bt_u16, NULL,
360 iemAImpl_bt_u32, NULL,
361 iemAImpl_bt_u64, NULL
362};
363
364/** Function table for the BTC instruction. */
365static const IEMOPBINSIZES g_iemAImpl_btc =
366{
367 NULL, NULL,
368 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
369 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
370 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
371};
372
373/** Function table for the BTR instruction. */
374static const IEMOPBINSIZES g_iemAImpl_btr =
375{
376 NULL, NULL,
377 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
378 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
379 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
380};
381
382/** Function table for the BTS instruction. */
383static const IEMOPBINSIZES g_iemAImpl_bts =
384{
385 NULL, NULL,
386 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
387 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
388 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
389};
390
391/** Function table for the BSF instruction. */
392static const IEMOPBINSIZES g_iemAImpl_bsf =
393{
394 NULL, NULL,
395 iemAImpl_bsf_u16, NULL,
396 iemAImpl_bsf_u32, NULL,
397 iemAImpl_bsf_u64, NULL
398};
399
400/** Function table for the BSR instruction. */
401static const IEMOPBINSIZES g_iemAImpl_bsr =
402{
403 NULL, NULL,
404 iemAImpl_bsr_u16, NULL,
405 iemAImpl_bsr_u32, NULL,
406 iemAImpl_bsr_u64, NULL
407};
408
409/** Function table for the IMUL instruction. */
410static const IEMOPBINSIZES g_iemAImpl_imul_two =
411{
412 NULL, NULL,
413 iemAImpl_imul_two_u16, NULL,
414 iemAImpl_imul_two_u32, NULL,
415 iemAImpl_imul_two_u64, NULL
416};
417
418/** Group 1 /r lookup table. */
419static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
420{
421 &g_iemAImpl_add,
422 &g_iemAImpl_or,
423 &g_iemAImpl_adc,
424 &g_iemAImpl_sbb,
425 &g_iemAImpl_and,
426 &g_iemAImpl_sub,
427 &g_iemAImpl_xor,
428 &g_iemAImpl_cmp
429};
430
431/** Function table for the INC instruction. */
432static const IEMOPUNARYSIZES g_iemAImpl_inc =
433{
434 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
435 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
436 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
437 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
438};
439
440/** Function table for the DEC instruction. */
441static const IEMOPUNARYSIZES g_iemAImpl_dec =
442{
443 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
444 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
445 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
446 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
447};
448
449/** Function table for the NEG instruction. */
450static const IEMOPUNARYSIZES g_iemAImpl_neg =
451{
452 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
453 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
454 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
455 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
456};
457
458/** Function table for the NOT instruction. */
459static const IEMOPUNARYSIZES g_iemAImpl_not =
460{
461 iemAImpl_not_u8, iemAImpl_not_u8_locked,
462 iemAImpl_not_u16, iemAImpl_not_u16_locked,
463 iemAImpl_not_u32, iemAImpl_not_u32_locked,
464 iemAImpl_not_u64, iemAImpl_not_u64_locked
465};
466
467
468/** Function table for the ROL instruction. */
469static const IEMOPSHIFTSIZES g_iemAImpl_rol =
470{
471 iemAImpl_rol_u8,
472 iemAImpl_rol_u16,
473 iemAImpl_rol_u32,
474 iemAImpl_rol_u64
475};
476
477/** Function table for the ROR instruction. */
478static const IEMOPSHIFTSIZES g_iemAImpl_ror =
479{
480 iemAImpl_ror_u8,
481 iemAImpl_ror_u16,
482 iemAImpl_ror_u32,
483 iemAImpl_ror_u64
484};
485
486/** Function table for the RCL instruction. */
487static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
488{
489 iemAImpl_rcl_u8,
490 iemAImpl_rcl_u16,
491 iemAImpl_rcl_u32,
492 iemAImpl_rcl_u64
493};
494
495/** Function table for the RCR instruction. */
496static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
497{
498 iemAImpl_rcr_u8,
499 iemAImpl_rcr_u16,
500 iemAImpl_rcr_u32,
501 iemAImpl_rcr_u64
502};
503
504/** Function table for the SHL instruction. */
505static const IEMOPSHIFTSIZES g_iemAImpl_shl =
506{
507 iemAImpl_shl_u8,
508 iemAImpl_shl_u16,
509 iemAImpl_shl_u32,
510 iemAImpl_shl_u64
511};
512
513/** Function table for the SHR instruction. */
514static const IEMOPSHIFTSIZES g_iemAImpl_shr =
515{
516 iemAImpl_shr_u8,
517 iemAImpl_shr_u16,
518 iemAImpl_shr_u32,
519 iemAImpl_shr_u64
520};
521
522/** Function table for the SAR instruction. */
523static const IEMOPSHIFTSIZES g_iemAImpl_sar =
524{
525 iemAImpl_sar_u8,
526 iemAImpl_sar_u16,
527 iemAImpl_sar_u32,
528 iemAImpl_sar_u64
529};
530
531
532/** Function table for the MUL instruction. */
533static const IEMOPMULDIVSIZES g_iemAImpl_mul =
534{
535 iemAImpl_mul_u8,
536 iemAImpl_mul_u16,
537 iemAImpl_mul_u32,
538 iemAImpl_mul_u64
539};
540
541/** Function table for the IMUL instruction working implicitly on rAX. */
542static const IEMOPMULDIVSIZES g_iemAImpl_imul =
543{
544 iemAImpl_imul_u8,
545 iemAImpl_imul_u16,
546 iemAImpl_imul_u32,
547 iemAImpl_imul_u64
548};
549
550/** Function table for the DIV instruction. */
551static const IEMOPMULDIVSIZES g_iemAImpl_div =
552{
553 iemAImpl_div_u8,
554 iemAImpl_div_u16,
555 iemAImpl_div_u32,
556 iemAImpl_div_u64
557};
558
559/** Function table for the MUL instruction. */
560static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
561{
562 iemAImpl_idiv_u8,
563 iemAImpl_idiv_u16,
564 iemAImpl_idiv_u32,
565 iemAImpl_idiv_u64
566};
567
568/** Function table for the SHLD instruction */
569static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
570{
571 iemAImpl_shld_u16,
572 iemAImpl_shld_u32,
573 iemAImpl_shld_u64,
574};
575
576/** Function table for the SHRD instruction */
577static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
578{
579 iemAImpl_shrd_u16,
580 iemAImpl_shrd_u32,
581 iemAImpl_shrd_u64,
582};
583
584
585/*******************************************************************************
586* Internal Functions *
587*******************************************************************************/
588static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
589static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
590static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
591static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
592static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
593static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
594static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
595static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
596static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
597static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
598static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
599static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
600static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
601static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
602static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
603static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
604static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
605static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
606static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
607
608#ifdef IEM_VERIFICATION_MODE
609static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
610#endif
611static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
612static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
613
614
615/**
616 * Initializes the decoder state.
617 *
618 * @param pIemCpu The per CPU IEM state.
619 */
620DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)
621{
622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
623
624 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
625 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
626 ? IEMMODE_64BIT
627 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
628 ? IEMMODE_32BIT
629 : IEMMODE_16BIT;
630 pIemCpu->enmCpuMode = enmMode;
631 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
632 pIemCpu->enmEffAddrMode = enmMode;
633 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
634 pIemCpu->enmEffOpSize = enmMode;
635 pIemCpu->fPrefixes = 0;
636 pIemCpu->uRexReg = 0;
637 pIemCpu->uRexB = 0;
638 pIemCpu->uRexIndex = 0;
639 pIemCpu->iEffSeg = X86_SREG_DS;
640 pIemCpu->offOpcode = 0;
641 pIemCpu->cbOpcode = 0;
642 pIemCpu->cActiveMappings = 0;
643 pIemCpu->iNextMapping = 0;
644}
645
646
647/**
648 * Prefetch opcodes the first time when starting executing.
649 *
650 * @returns Strict VBox status code.
651 * @param pIemCpu The IEM state.
652 */
653static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
654{
655#ifdef IEM_VERIFICATION_MODE
656 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
657#endif
658 iemInitDecode(pIemCpu);
659
660 /*
661 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
662 *
663 * First translate CS:rIP to a physical address.
664 */
665 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
666 uint32_t cbToTryRead;
667 RTGCPTR GCPtrPC;
668 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
669 {
670 cbToTryRead = PAGE_SIZE;
671 GCPtrPC = pCtx->rip;
672 if (!IEM_IS_CANONICAL(GCPtrPC))
673 return iemRaiseGeneralProtectionFault0(pIemCpu);
674 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
675 }
676 else
677 {
678 uint32_t GCPtrPC32 = pCtx->eip;
679 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
680 if (GCPtrPC32 > pCtx->csHid.u32Limit)
681 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
682 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
683 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
684 }
685
686 RTGCPHYS GCPhys;
687 uint64_t fFlags;
688 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
689 if (RT_FAILURE(rc))
690 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
691 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
692 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
693 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
694 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
695 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
696 /** @todo Check reserved bits and such stuff. PGM is better at doing
697 * that, so do it when implementing the guest virtual address
698 * TLB... */
699
700#ifdef IEM_VERIFICATION_MODE
701 /*
702 * Optimistic optimization: Use unconsumed opcode bytes from the previous
703 * instruction.
704 */
705 /** @todo optimize this differently by not using PGMPhysRead. */
706 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
707 pIemCpu->GCPhysOpcodes = GCPhys;
708 if ( offPrevOpcodes < cbOldOpcodes
709 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
710 {
711 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
712 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
713 pIemCpu->cbOpcode = cbNew;
714 return VINF_SUCCESS;
715 }
716#endif
717
718 /*
719 * Read the bytes at this address.
720 */
721 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
722 if (cbToTryRead > cbLeftOnPage)
723 cbToTryRead = cbLeftOnPage;
724 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
725 cbToTryRead = sizeof(pIemCpu->abOpcode);
726 /** @todo patch manager */
727 if (!pIemCpu->fByPassHandlers)
728 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
729 else
730 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
731 if (rc != VINF_SUCCESS)
732 return rc;
733 pIemCpu->cbOpcode = cbToTryRead;
734
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
741 * exception if it fails.
742 *
743 * @returns Strict VBox status code.
744 * @param pIemCpu The IEM state.
745 * @param cbMin Where to return the opcode byte.
746 */
747static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
748{
749 /*
750 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
751 *
752 * First translate CS:rIP to a physical address.
753 */
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
756 uint32_t cbToTryRead;
757 RTGCPTR GCPtrNext;
758 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
759 {
760 cbToTryRead = PAGE_SIZE;
761 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
762 if (!IEM_IS_CANONICAL(GCPtrNext))
763 return iemRaiseGeneralProtectionFault0(pIemCpu);
764 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
765 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
766 }
767 else
768 {
769 uint32_t GCPtrNext32 = pCtx->eip;
770 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
771 GCPtrNext32 += pIemCpu->cbOpcode;
772 if (GCPtrNext32 > pCtx->csHid.u32Limit)
773 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
774 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
775 if (cbToTryRead < cbMin - cbLeft)
776 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
777 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
778 }
779
780 RTGCPHYS GCPhys;
781 uint64_t fFlags;
782 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
783 if (RT_FAILURE(rc))
784 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
785 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)
786 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
787 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
788 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
789 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
790 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
791 /** @todo Check reserved bits and such stuff. PGM is better at doing
792 * that, so do it when implementing the guest virtual address
793 * TLB... */
794
795 /*
796 * Read the bytes at this address.
797 */
798 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
799 if (cbToTryRead > cbLeftOnPage)
800 cbToTryRead = cbLeftOnPage;
801 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
802 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
803 Assert(cbToTryRead >= cbMin - cbLeft);
804 if (!pIemCpu->fByPassHandlers)
805 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
806 else
807 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
808 if (rc != VINF_SUCCESS)
809 return rc;
810 pIemCpu->cbOpcode += cbToTryRead;
811 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
812
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.
819 *
820 * @returns Strict VBox status code.
821 * @param pIemCpu The IEM state.
822 * @param pb Where to return the opcode byte.
823 */
824DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)
825{
826 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
827 if (rcStrict == VINF_SUCCESS)
828 {
829 uint8_t offOpcode = pIemCpu->offOpcode;
830 *pb = pIemCpu->abOpcode[offOpcode];
831 pIemCpu->offOpcode = offOpcode + 1;
832 }
833 else
834 *pb = 0;
835 return rcStrict;
836}
837
838
839/**
840 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
841 *
842 * @returns Strict VBox status code.
843 * @param pIemCpu The IEM state.
844 * @param pu16 Where to return the opcode dword.
845 */
846DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
847{
848 uint8_t u8;
849 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);
850 if (rcStrict == VINF_SUCCESS)
851 *pu16 = (int8_t)u8;
852 return rcStrict;
853}
854
855
856/**
857 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
858 *
859 * @returns Strict VBox status code.
860 * @param pIemCpu The IEM state.
861 * @param pu16 Where to return the opcode word.
862 */
863DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
864{
865 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
866 if (rcStrict == VINF_SUCCESS)
867 {
868 uint8_t offOpcode = pIemCpu->offOpcode;
869 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
870 pIemCpu->offOpcode = offOpcode + 2;
871 }
872 else
873 *pu16 = 0;
874 return rcStrict;
875}
876
877
878/**
879 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
880 *
881 * @returns Strict VBox status code.
882 * @param pIemCpu The IEM state.
883 * @param pu32 Where to return the opcode dword.
884 */
885DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
886{
887 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
888 if (rcStrict == VINF_SUCCESS)
889 {
890 uint8_t offOpcode = pIemCpu->offOpcode;
891 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
892 pIemCpu->abOpcode[offOpcode + 1],
893 pIemCpu->abOpcode[offOpcode + 2],
894 pIemCpu->abOpcode[offOpcode + 3]);
895 pIemCpu->offOpcode = offOpcode + 4;
896 }
897 else
898 *pu32 = 0;
899 return rcStrict;
900}
901
902
903/**
904 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
905 *
906 * @returns Strict VBox status code.
907 * @param pIemCpu The IEM state.
908 * @param pu64 Where to return the opcode qword.
909 */
910DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
911{
912 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
913 if (rcStrict == VINF_SUCCESS)
914 {
915 uint8_t offOpcode = pIemCpu->offOpcode;
916 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
917 pIemCpu->abOpcode[offOpcode + 1],
918 pIemCpu->abOpcode[offOpcode + 2],
919 pIemCpu->abOpcode[offOpcode + 3]);
920 pIemCpu->offOpcode = offOpcode + 4;
921 }
922 else
923 *pu64 = 0;
924 return rcStrict;
925}
926
927
928/**
929 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
930 *
931 * @returns Strict VBox status code.
932 * @param pIemCpu The IEM state.
933 * @param pu64 Where to return the opcode qword.
934 */
935DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
936{
937 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
938 if (rcStrict == VINF_SUCCESS)
939 {
940 uint8_t offOpcode = pIemCpu->offOpcode;
941 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
942 pIemCpu->abOpcode[offOpcode + 1],
943 pIemCpu->abOpcode[offOpcode + 2],
944 pIemCpu->abOpcode[offOpcode + 3],
945 pIemCpu->abOpcode[offOpcode + 4],
946 pIemCpu->abOpcode[offOpcode + 5],
947 pIemCpu->abOpcode[offOpcode + 6],
948 pIemCpu->abOpcode[offOpcode + 7]);
949 pIemCpu->offOpcode = offOpcode + 8;
950 }
951 else
952 *pu64 = 0;
953 return rcStrict;
954}
955
956
957/**
958 * Fetches the next opcode byte.
959 *
960 * @returns Strict VBox status code.
961 * @param pIemCpu The IEM state.
962 * @param pu8 Where to return the opcode byte.
963 */
964DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
965{
966 uint8_t const offOpcode = pIemCpu->offOpcode;
967 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
968 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);
969
970 *pu8 = pIemCpu->abOpcode[offOpcode];
971 pIemCpu->offOpcode = offOpcode + 1;
972 return VINF_SUCCESS;
973}
974
975/**
976 * Fetches the next opcode byte, returns automatically on failure.
977 *
978 * @param a_pu8 Where to return the opcode byte.
979 * @remark Implicitly references pIemCpu.
980 */
981#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
982 do \
983 { \
984 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
985 if (rcStrict2 != VINF_SUCCESS) \
986 return rcStrict2; \
987 } while (0)
988
989
990/**
991 * Fetches the next signed byte from the opcode stream.
992 *
993 * @returns Strict VBox status code.
994 * @param pIemCpu The IEM state.
995 * @param pi8 Where to return the signed byte.
996 */
997DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
998{
999 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1000}
1001
1002/**
1003 * Fetches the next signed byte from the opcode stream, returning automatically
1004 * on failure.
1005 *
1006 * @param pi8 Where to return the signed byte.
1007 * @remark Implicitly references pIemCpu.
1008 */
1009#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1010 do \
1011 { \
1012 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1013 if (rcStrict2 != VINF_SUCCESS) \
1014 return rcStrict2; \
1015 } while (0)
1016
1017
1018/**
1019 * Fetches the next signed byte from the opcode stream, extending it to
1020 * unsigned 16-bit.
1021 *
1022 * @returns Strict VBox status code.
1023 * @param pIemCpu The IEM state.
1024 * @param pu16 Where to return the unsigned word.
1025 */
1026DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1027{
1028 uint8_t const offOpcode = pIemCpu->offOpcode;
1029 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1030 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1031
1032 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1033 pIemCpu->offOpcode = offOpcode + 1;
1034 return VINF_SUCCESS;
1035}
1036
1037
1038/**
1039 * Fetches the next signed byte from the opcode stream and sign-extending it to
1040 * a word, returning automatically on failure.
1041 *
1042 * @param pu16 Where to return the word.
1043 * @remark Implicitly references pIemCpu.
1044 */
1045#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1046 do \
1047 { \
1048 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1049 if (rcStrict2 != VINF_SUCCESS) \
1050 return rcStrict2; \
1051 } while (0)
1052
1053
1054/**
1055 * Fetches the next opcode word.
1056 *
1057 * @returns Strict VBox status code.
1058 * @param pIemCpu The IEM state.
1059 * @param pu16 Where to return the opcode word.
1060 */
1061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1062{
1063 uint8_t const offOpcode = pIemCpu->offOpcode;
1064 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1065 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1066
1067 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1068 pIemCpu->offOpcode = offOpcode + 2;
1069 return VINF_SUCCESS;
1070}
1071
1072/**
1073 * Fetches the next opcode word, returns automatically on failure.
1074 *
1075 * @param a_pu16 Where to return the opcode word.
1076 * @remark Implicitly references pIemCpu.
1077 */
1078#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1079 do \
1080 { \
1081 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1082 if (rcStrict2 != VINF_SUCCESS) \
1083 return rcStrict2; \
1084 } while (0)
1085
1086
1087/**
1088 * Fetches the next signed word from the opcode stream.
1089 *
1090 * @returns Strict VBox status code.
1091 * @param pIemCpu The IEM state.
1092 * @param pi16 Where to return the signed word.
1093 */
1094DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1095{
1096 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1097}
1098
1099/**
1100 * Fetches the next signed word from the opcode stream, returning automatically
1101 * on failure.
1102 *
1103 * @param pi16 Where to return the signed word.
1104 * @remark Implicitly references pIemCpu.
1105 */
1106#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1107 do \
1108 { \
1109 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1110 if (rcStrict2 != VINF_SUCCESS) \
1111 return rcStrict2; \
1112 } while (0)
1113
1114
1115/**
1116 * Fetches the next opcode dword.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pIemCpu The IEM state.
1120 * @param pu32 Where to return the opcode double word.
1121 */
1122DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1123{
1124 uint8_t const offOpcode = pIemCpu->offOpcode;
1125 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1126 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1127
1128 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1129 pIemCpu->abOpcode[offOpcode + 1],
1130 pIemCpu->abOpcode[offOpcode + 2],
1131 pIemCpu->abOpcode[offOpcode + 3]);
1132 pIemCpu->offOpcode = offOpcode + 4;
1133 return VINF_SUCCESS;
1134}
1135
1136/**
1137 * Fetches the next opcode dword, returns automatically on failure.
1138 *
1139 * @param a_u32 Where to return the opcode dword.
1140 * @remark Implicitly references pIemCpu.
1141 */
1142#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1143 do \
1144 { \
1145 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1146 if (rcStrict2 != VINF_SUCCESS) \
1147 return rcStrict2; \
1148 } while (0)
1149
1150
1151/**
1152 * Fetches the next signed double word from the opcode stream.
1153 *
1154 * @returns Strict VBox status code.
1155 * @param pIemCpu The IEM state.
1156 * @param pi32 Where to return the signed double word.
1157 */
1158DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1159{
1160 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1161}
1162
1163/**
1164 * Fetches the next signed double word from the opcode stream, returning
1165 * automatically on failure.
1166 *
1167 * @param pi32 Where to return the signed double word.
1168 * @remark Implicitly references pIemCpu.
1169 */
1170#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1171 do \
1172 { \
1173 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1174 if (rcStrict2 != VINF_SUCCESS) \
1175 return rcStrict2; \
1176 } while (0)
1177
1178
1179/**
1180 * Fetches the next opcode dword, sign extending it into a quad word.
1181 *
1182 * @returns Strict VBox status code.
1183 * @param pIemCpu The IEM state.
1184 * @param pu64 Where to return the opcode quad word.
1185 */
1186DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1187{
1188 uint8_t const offOpcode = pIemCpu->offOpcode;
1189 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1190 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1191
1192 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1193 pIemCpu->abOpcode[offOpcode + 1],
1194 pIemCpu->abOpcode[offOpcode + 2],
1195 pIemCpu->abOpcode[offOpcode + 3]);
1196 *pu64 = i32;
1197 pIemCpu->offOpcode = offOpcode + 4;
1198 return VINF_SUCCESS;
1199}
1200
1201/**
1202 * Fetches the next opcode double word and sign extends it to a quad word,
1203 * returns automatically on failure.
1204 *
1205 * @param a_pu64 Where to return the opcode quad word.
1206 * @remark Implicitly references pIemCpu.
1207 */
1208#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1209 do \
1210 { \
1211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1212 if (rcStrict2 != VINF_SUCCESS) \
1213 return rcStrict2; \
1214 } while (0)
1215
1216
1217/**
1218 * Fetches the next opcode qword.
1219 *
1220 * @returns Strict VBox status code.
1221 * @param pIemCpu The IEM state.
1222 * @param pu64 Where to return the opcode qword.
1223 */
1224DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1225{
1226 uint8_t const offOpcode = pIemCpu->offOpcode;
1227 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1228 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1229
1230 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1231 pIemCpu->abOpcode[offOpcode + 1],
1232 pIemCpu->abOpcode[offOpcode + 2],
1233 pIemCpu->abOpcode[offOpcode + 3],
1234 pIemCpu->abOpcode[offOpcode + 4],
1235 pIemCpu->abOpcode[offOpcode + 5],
1236 pIemCpu->abOpcode[offOpcode + 6],
1237 pIemCpu->abOpcode[offOpcode + 7]);
1238 pIemCpu->offOpcode = offOpcode + 8;
1239 return VINF_SUCCESS;
1240}
1241
1242/**
1243 * Fetches the next opcode quad word, returns automatically on failure.
1244 *
1245 * @param a_pu64 Where to return the opcode quad word.
1246 * @remark Implicitly references pIemCpu.
1247 */
1248#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1249 do \
1250 { \
1251 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1252 if (rcStrict2 != VINF_SUCCESS) \
1253 return rcStrict2; \
1254 } while (0)
1255
1256
1257/** @name Misc Worker Functions.
1258 * @{
1259 */
1260
1261
1262/**
1263 * Validates a new SS segment.
1264 *
1265 * @returns VBox strict status code.
1266 * @param pIemCpu The IEM per CPU instance data.
1267 * @param pCtx The CPU context.
1268 * @param NewSS The new SS selctor.
1269 * @param uCpl The CPL to load the stack for.
1270 * @param pDesc Where to return the descriptor.
1271 */
1272static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1273{
1274 /* Null selectors are not allowed (we're not called for dispatching
1275 interrupts with SS=0 in long mode). */
1276 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1277 {
1278 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1279 return iemRaiseGeneralProtectionFault0(pIemCpu);
1280 }
1281
1282 /*
1283 * Read the descriptor.
1284 */
1285 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1286 if (rcStrict != VINF_SUCCESS)
1287 return rcStrict;
1288
1289 /*
1290 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1291 */
1292 if (!pDesc->Legacy.Gen.u1DescType)
1293 {
1294 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1295 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1296 }
1297
1298 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1299 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1300 {
1301 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1302 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1303 }
1304 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1305 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1306 {
1307 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1308 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1309 }
1310 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1311 if ((NewSS & X86_SEL_RPL) != uCpl)
1312 {
1313 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1314 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1315 }
1316 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1317 {
1318 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1319 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1320 }
1321
1322 /* Is it there? */
1323 /** @todo testcase: Is this checked before the canonical / limit check below? */
1324 if (!pDesc->Legacy.Gen.u1Present)
1325 {
1326 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1327 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1328 }
1329
1330 return VINF_SUCCESS;
1331}
1332
1333
1334/** @} */
1335
1336/** @name Raising Exceptions.
1337 *
1338 * @{
1339 */
1340
1341/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1342 * @{ */
1343/** CPU exception. */
1344#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1345/** External interrupt (from PIC, APIC, whatever). */
1346#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1347/** Software interrupt (int, into or bound). */
1348#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1349/** Takes an error code. */
1350#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1351/** Takes a CR2. */
1352#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1353/** Generated by the breakpoint instruction. */
1354#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1355/** Mask out the nesting level. */
1356#define IEM_XCPT_FLAGS_NESTING_MASK UINT32_C(0xff000000)
1357/** Shift count for the nesting level. */
1358#define IEM_XCPT_FLAGS_NESTING_SHIFT 24
1359/** Mask out the nesting level after shifting. */
1360#define IEM_XCPT_FLAGS_NESTING_SMASK UINT32_C(0x000000ff)
1361/** @} */
1362
1363/**
1364 * Loads the specified stack far pointer from the TSS.
1365 *
1366 * @returns VBox strict status code.
1367 * @param pIemCpu The IEM per CPU instance data.
1368 * @param pCtx The CPU context.
1369 * @param uCpl The CPL to load the stack for.
1370 * @param pSelSS Where to return the new stack segment.
1371 * @param puEsp Where to return the new stack pointer.
1372 */
1373static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1374 PRTSEL pSelSS, uint32_t *puEsp)
1375{
1376 VBOXSTRICTRC rcStrict;
1377 Assert(uCpl < 4);
1378 *puEsp = 0; /* make gcc happy */
1379 *pSelSS = 0; /* make gcc happy */
1380
1381 switch (pCtx->trHid.Attr.n.u4Type)
1382 {
1383 /*
1384 * 16-bit TSS (X86TSS16).
1385 */
1386 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1387 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1388 {
1389 uint32_t off = uCpl * 4 + 2;
1390 if (off + 4 > pCtx->trHid.u32Limit)
1391 {
1392 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1393 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1394 }
1395
1396 uint32_t u32Tmp;
1397 rcStrict = iemMemFetchDataU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1398 if (rcStrict == VINF_SUCCESS)
1399 {
1400 *puEsp = RT_LOWORD(u32Tmp);
1401 *pSelSS = RT_HIWORD(u32Tmp);
1402 return VINF_SUCCESS;
1403 }
1404 break;
1405 }
1406
1407 /*
1408 * 32-bit TSS (X86TSS32).
1409 */
1410 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1411 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1412 {
1413 uint32_t off = uCpl * 8 + 4;
1414 if (off + 7 > pCtx->trHid.u32Limit)
1415 {
1416 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1417 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1418 }
1419
1420 uint64_t u64Tmp;
1421 rcStrict = iemMemFetchDataU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1422 if (rcStrict == VINF_SUCCESS)
1423 {
1424 *puEsp = u64Tmp & UINT32_MAX;
1425 *pSelSS = (RTSEL)(u64Tmp >> 32);
1426 return VINF_SUCCESS;
1427 }
1428 break;
1429 }
1430
1431 default:
1432 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1433 }
1434 return rcStrict;
1435}
1436
1437
1438/**
1439 * Adjust the CPU state according to the exception being raised.
1440 *
1441 * @param pCtx The CPU context.
1442 * @param u8Vector The exception that has been raised.
1443 */
1444DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1445{
1446 switch (u8Vector)
1447 {
1448 case X86_XCPT_DB:
1449 pCtx->dr[7] &= ~X86_DR7_GD;
1450 break;
1451 /** @todo Read the AMD and Intel exception reference... */
1452 }
1453}
1454
1455
1456/**
1457 * Implements exceptions and interrupts for real mode.
1458 *
1459 * @returns VBox strict status code.
1460 * @param pIemCpu The IEM per CPU instance data.
1461 * @param pCtx The CPU context.
1462 * @param cbInstr The number of bytes to offset rIP by in the return
1463 * address.
1464 * @param u8Vector The interrupt / exception vector number.
1465 * @param fFlags The flags.
1466 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1467 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1468 */
1469static VBOXSTRICTRC
1470iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1471 PCPUMCTX pCtx,
1472 uint8_t cbInstr,
1473 uint8_t u8Vector,
1474 uint32_t fFlags,
1475 uint16_t uErr,
1476 uint64_t uCr2)
1477{
1478 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1479
1480 /*
1481 * Read the IDT entry.
1482 */
1483 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1484 {
1485 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1486 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1487 }
1488 RTFAR16 Idte;
1489 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1490 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1491 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1492 return rcStrict;
1493
1494 /*
1495 * Push the stack frame.
1496 */
1497 uint16_t *pu16Frame;
1498 uint64_t uNewRsp;
1499 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1500 if (rcStrict != VINF_SUCCESS)
1501 return rcStrict;
1502
1503 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1504 pu16Frame[1] = (uint16_t)pCtx->cs;
1505 pu16Frame[0] = pCtx->ip + cbInstr;
1506 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1507 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1508 return rcStrict;
1509
1510 /*
1511 * Load the vector address into cs:ip and make exception specific state
1512 * adjustments.
1513 */
1514 pCtx->cs = Idte.sel;
1515 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1516 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1517 pCtx->rip = Idte.off;
1518 pCtx->eflags.Bits.u1IF = 0;
1519
1520 /** @todo do we actually do this in real mode? */
1521 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1522 iemRaiseXcptAdjustState(pCtx, u8Vector);
1523
1524 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1525}
1526
1527
1528/**
1529 * Implements exceptions and interrupts for protected mode.
1530 *
1531 * @returns VBox strict status code.
1532 * @param pIemCpu The IEM per CPU instance data.
1533 * @param pCtx The CPU context.
1534 * @param cbInstr The number of bytes to offset rIP by in the return
1535 * address.
1536 * @param u8Vector The interrupt / exception vector number.
1537 * @param fFlags The flags.
1538 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1539 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1540 */
1541static VBOXSTRICTRC
1542iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1543 PCPUMCTX pCtx,
1544 uint8_t cbInstr,
1545 uint8_t u8Vector,
1546 uint32_t fFlags,
1547 uint16_t uErr,
1548 uint64_t uCr2)
1549{
1550 /*
1551 * Read the IDT entry.
1552 */
1553 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1554 {
1555 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1556 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1557 }
1558 X86DESC Idte;
1559 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &Idte.u, UINT8_MAX,
1560 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1561 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1562 return rcStrict;
1563
1564 /*
1565 * Check the descriptor type, DPL and such.
1566 * ASSUMES this is done in the same order as described for call-gate calls.
1567 */
1568 if (Idte.Gate.u1DescType)
1569 {
1570 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1571 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1572 }
1573 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1574 switch (Idte.Gate.u4Type)
1575 {
1576 case X86_SEL_TYPE_SYS_UNDEFINED:
1577 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1578 case X86_SEL_TYPE_SYS_LDT:
1579 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1580 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1581 case X86_SEL_TYPE_SYS_UNDEFINED2:
1582 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1583 case X86_SEL_TYPE_SYS_UNDEFINED3:
1584 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1585 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1586 case X86_SEL_TYPE_SYS_UNDEFINED4:
1587 {
1588 /** @todo check what actually happens when the type is wrong...
1589 * esp. call gates. */
1590 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1591 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1592 }
1593
1594 case X86_SEL_TYPE_SYS_286_INT_GATE:
1595 case X86_SEL_TYPE_SYS_386_INT_GATE:
1596 fEflToClear |= X86_EFL_IF;
1597 break;
1598
1599 case X86_SEL_TYPE_SYS_TASK_GATE:
1600 /** @todo task gates. */
1601 AssertFailedReturn(VERR_NOT_SUPPORTED);
1602
1603 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1604 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1605 break;
1606
1607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1608 }
1609
1610 /* Check DPL against CPL if applicable. */
1611 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1612 {
1613 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1614 {
1615 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1616 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1617 }
1618 }
1619
1620 /* Is it there? */
1621 if (!Idte.Gate.u1Present)
1622 {
1623 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1624 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1625 }
1626
1627 /* A null CS is bad. */
1628 RTSEL NewCS = Idte.Gate.u16Sel;
1629 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1630 {
1631 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1632 return iemRaiseGeneralProtectionFault0(pIemCpu);
1633 }
1634
1635 /* Fetch the descriptor for the new CS. */
1636 IEMSELDESC DescCS;
1637 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1638 if (rcStrict != VINF_SUCCESS)
1639 return rcStrict;
1640
1641 /* Must be a code segment. */
1642 if (!DescCS.Legacy.Gen.u1DescType)
1643 {
1644 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1645 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1646 }
1647 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1648 {
1649 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1650 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1651 }
1652
1653 /* Don't allow lowering the privilege level. */
1654 /** @todo Does the lowering of privileges apply to software interrupts
1655 * only? This has bearings on the more-privileged or
1656 * same-privilege stack behavior further down. A testcase would
1657 * be nice. */
1658 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1659 {
1660 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1661 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1662 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1663 }
1664 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1665
1666 /* Check the new EIP against the new CS limit. */
1667 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1668 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1669 ? Idte.Gate.u16OffsetLow
1670 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1671 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1672 if (DescCS.Legacy.Gen.u1Granularity)
1673 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1674 if (uNewEip > cbLimitCS)
1675 {
1676 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1677 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1678 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1679 }
1680
1681 /* Make sure the selector is present. */
1682 if (!DescCS.Legacy.Gen.u1Present)
1683 {
1684 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1685 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1686 }
1687
1688 /*
1689 * If the privilege level changes, we need to get a new stack from the TSS.
1690 * This in turns means validating the new SS and ESP...
1691 */
1692 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1693 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1694 if (uNewCpl != pIemCpu->uCpl)
1695 {
1696 RTSEL NewSS;
1697 uint32_t uNewEsp;
1698 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1699 if (rcStrict != VINF_SUCCESS)
1700 return rcStrict;
1701
1702 IEMSELDESC DescSS;
1703 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1704 if (rcStrict != VINF_SUCCESS)
1705 return rcStrict;
1706
1707 /* Check that there is sufficient space for the stack frame. */
1708 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1709 if (DescSS.Legacy.Gen.u1Granularity)
1710 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1711 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_NOT_IMPLEMENTED);
1712
1713 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1714 if ( uNewEsp - 1 > cbLimitSS
1715 || uNewEsp < cbStackFrame)
1716 {
1717 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1718 u8Vector, NewSS, uNewEsp, cbStackFrame));
1719 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1720 }
1721
1722 /*
1723 * Start making changes.
1724 */
1725
1726 /* Create the stack frame. */
1727 RTPTRUNION uStackFrame;
1728 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1729 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W);
1730 if (rcStrict != VINF_SUCCESS)
1731 return rcStrict;
1732 void * const pvStackFrame = uStackFrame.pv;
1733
1734 if (fFlags & IEM_XCPT_FLAGS_ERR)
1735 *uStackFrame.pu32++ = uErr;
1736 uStackFrame.pu32[0] = pCtx->eip;
1737 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1738 uStackFrame.pu32[2] = pCtx->eflags.u;
1739 uStackFrame.pu32[3] = pCtx->esp;
1740 uStackFrame.pu32[4] = pCtx->ss;
1741 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W);
1742 if (rcStrict != VINF_SUCCESS)
1743 return rcStrict;
1744
1745 /* Mark the selectors 'accessed' (hope this is the correct time). */
1746 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1747 * after pushing the stack frame? (Write protect the gdt + stack to
1748 * find out.) */
1749 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1750 {
1751 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1752 if (rcStrict != VINF_SUCCESS)
1753 return rcStrict;
1754 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1755 }
1756
1757 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1758 {
1759 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1760 if (rcStrict != VINF_SUCCESS)
1761 return rcStrict;
1762 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1763 }
1764
1765 /*
1766 * Start commint the register changes (joins with the DPL=CPL branch).
1767 */
1768 pCtx->ss = NewSS;
1769 pCtx->ssHid.u32Limit = cbLimitSS;
1770 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1771 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1772 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1773 pIemCpu->uCpl = uNewCpl;
1774 }
1775 /*
1776 * Same privilege, no stack change and smaller stack frame.
1777 */
1778 else
1779 {
1780 uint64_t uNewRsp;
1781 RTPTRUNION uStackFrame;
1782 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
1783 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
1784 if (rcStrict != VINF_SUCCESS)
1785 return rcStrict;
1786 void * const pvStackFrame = uStackFrame.pv;
1787
1788 if (fFlags & IEM_XCPT_FLAGS_ERR)
1789 *uStackFrame.pu32++ = uErr;
1790 uStackFrame.pu32[0] = pCtx->eip;
1791 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1792 uStackFrame.pu32[2] = pCtx->eflags.u;
1793 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
1794 if (rcStrict != VINF_SUCCESS)
1795 return rcStrict;
1796
1797 /* Mark the CS selector as 'accessed'. */
1798 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1799 {
1800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1801 if (rcStrict != VINF_SUCCESS)
1802 return rcStrict;
1803 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1804 }
1805
1806 /*
1807 * Start committing the register changes (joins with the other branch).
1808 */
1809 pCtx->rsp = uNewRsp;
1810 }
1811
1812 /* ... register commiting continues. */
1813 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1814 pCtx->csHid.u32Limit = cbLimitCS;
1815 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
1816 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
1817
1818 pCtx->rip = uNewEip;
1819 pCtx->rflags.u &= ~fEflToClear;
1820
1821 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1822}
1823
1824
1825/**
1826 * Implements exceptions and interrupts for V8086 mode.
1827 *
1828 * @returns VBox strict status code.
1829 * @param pIemCpu The IEM per CPU instance data.
1830 * @param pCtx The CPU context.
1831 * @param cbInstr The number of bytes to offset rIP by in the return
1832 * address.
1833 * @param u8Vector The interrupt / exception vector number.
1834 * @param fFlags The flags.
1835 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1836 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1837 */
1838static VBOXSTRICTRC
1839iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
1840 PCPUMCTX pCtx,
1841 uint8_t cbInstr,
1842 uint8_t u8Vector,
1843 uint32_t fFlags,
1844 uint16_t uErr,
1845 uint64_t uCr2)
1846{
1847 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
1848 return VERR_NOT_IMPLEMENTED;
1849}
1850
1851
1852/**
1853 * Implements exceptions and interrupts for long mode.
1854 *
1855 * @returns VBox strict status code.
1856 * @param pIemCpu The IEM per CPU instance data.
1857 * @param pCtx The CPU context.
1858 * @param cbInstr The number of bytes to offset rIP by in the return
1859 * address.
1860 * @param u8Vector The interrupt / exception vector number.
1861 * @param fFlags The flags.
1862 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1863 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1864 */
1865static VBOXSTRICTRC
1866iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
1867 PCPUMCTX pCtx,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2)
1873{
1874 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
1875 return VERR_NOT_IMPLEMENTED;
1876}
1877
1878
1879/**
1880 * Implements exceptions and interrupts.
1881 *
1882 * All exceptions and interrupts goes thru this function!
1883 *
1884 * @returns VBox strict status code.
1885 * @param pIemCpu The IEM per CPU instance data.
1886 * @param cbInstr The number of bytes to offset rIP by in the return
1887 * address.
1888 * @param u8Vector The interrupt / exception vector number.
1889 * @param fFlags The flags.
1890 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1891 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1892 */
1893static VBOXSTRICTRC
1894iemRaiseXcptOrInt(PIEMCPU pIemCpu,
1895 uint8_t cbInstr,
1896 uint8_t u8Vector,
1897 uint32_t fFlags,
1898 uint16_t uErr,
1899 uint64_t uCr2)
1900{
1901 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1902
1903 /*
1904 * Do recursion accounting.
1905 */
1906 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
1907 if (pIemCpu->cXcptRecursions == 0)
1908 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
1909 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
1910 else
1911 {
1912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d\n",
1913 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1));
1914
1915 /** @todo double and tripple faults. */
1916 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_NOT_IMPLEMENTED);
1917 }
1918 pIemCpu->cXcptRecursions++;
1919 pIemCpu->uCurXcpt = u8Vector;
1920
1921 /*
1922 * Call the mode specific worker function.
1923 */
1924 VBOXSTRICTRC rcStrict;
1925 if (!(pCtx->cr0 & X86_CR0_PE))
1926 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1927 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1928 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1929 else if (!pCtx->eflags.Bits.u1VM)
1930 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1931 else
1932 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
1933
1934 /*
1935 * Unwind.
1936 */
1937 pIemCpu->cXcptRecursions--;
1938 pIemCpu->uCurXcpt = uPrevXcpt;
1939 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
1940 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
1941 return rcStrict;
1942}
1943
1944
1945/** \#DE - 00. */
1946static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)
1947{
1948 AssertFailed(/** @todo implement this */);
1949 return VERR_NOT_IMPLEMENTED;
1950}
1951
1952
1953/** \#DB - 01. */
1954static VBOXSTRICTRC iemRaiseDebugException(PIEMCPU pIemCpu)
1955{
1956 AssertFailed(/** @todo implement this */);
1957 return VERR_NOT_IMPLEMENTED;
1958}
1959
1960
1961/** \#UD - 06. */
1962static VBOXSTRICTRC iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
1963{
1964 AssertFailed(/** @todo implement X86_XCPT_UD */);
1965 return VERR_NOT_IMPLEMENTED;
1966}
1967
1968
1969/** \#NM - 07. */
1970static VBOXSTRICTRC iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
1971{
1972 AssertFailed(/** @todo implement this */);
1973 return VERR_NOT_IMPLEMENTED;
1974}
1975
1976
1977/** \#TS(err) - 0a. */
1978static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
1979{
1980 AssertFailed(/** @todo implement this */);
1981 return VERR_NOT_IMPLEMENTED;
1982}
1983
1984
1985/** \#TS(tr) - 0a. */
1986static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
1987{
1988 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, pIemCpu->CTX_SUFF(pCtx)->tr);
1989}
1990
1991
1992/** \#NP(err) - 0b. */
1993static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
1994{
1995 AssertFailed(/** @todo implement this */);
1996 return VERR_NOT_IMPLEMENTED;
1997}
1998
1999
2000/** \#NP(seg) - 0b. */
2001static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2002{
2003 AssertFailed(/** @todo implement this */);
2004 return VERR_NOT_IMPLEMENTED;
2005}
2006
2007
2008/** \#NP(sel) - 0b. */
2009static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2010{
2011 AssertFailed(/** @todo implement this */);
2012 return VERR_NOT_IMPLEMENTED;
2013}
2014
2015
2016/** \#GP(n) - 0d. */
2017static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2018{
2019 AssertFailed(/** @todo implement this */);
2020 return VERR_NOT_IMPLEMENTED;
2021}
2022
2023
2024/** \#GP(0) - 0d. */
2025static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2026{
2027 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2028}
2029
2030
2031/** \#GP(sel) - 0d. */
2032static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2033{
2034 return iemRaiseGeneralProtectionFault(pIemCpu, Sel & (X86_SEL_MASK | X86_SEL_LDT));
2035}
2036
2037
2038/** \#GP(0) - 0d. */
2039static VBOXSTRICTRC iemRaiseNotCanonical(PIEMCPU pIemCpu)
2040{
2041 AssertFailed(/** @todo implement this */);
2042 return VERR_NOT_IMPLEMENTED;
2043}
2044
2045
2046/** \#GP(sel) - 0d. */
2047static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2048{
2049 AssertFailed(/** @todo implement this */);
2050 return VERR_NOT_IMPLEMENTED;
2051}
2052
2053
2054/** \#GP(sel) - 0d. */
2055static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2056{
2057 AssertFailed(/** @todo implement this */);
2058 return VERR_NOT_IMPLEMENTED;
2059}
2060
2061
2062/** \#GP(sel) - 0d. */
2063static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2064{
2065 AssertFailed(/** @todo implement this */);
2066 return VERR_NOT_IMPLEMENTED;
2067}
2068
2069
2070/** \#PF(n) - 0e. */
2071static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2072{
2073 /** @todo implement this */
2074 AssertMsgFailed(("GCPtrWhere=%RGp fAccess=%#x rc=%Rrc\n", GCPtrWhere, fAccess, rc));
2075 return VERR_NOT_IMPLEMENTED;
2076}
2077
2078
2079/** \#MF(n) - 10. */
2080static VBOXSTRICTRC iemRaiseMathFault(PIEMCPU pIemCpu)
2081{
2082 AssertFailed(/** @todo implement this */);
2083 return VERR_NOT_IMPLEMENTED;
2084}
2085
2086
2087
2088/**
2089 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2090 *
2091 * This enables us to add/remove arguments and force different levels of
2092 * inlining as we wish.
2093 *
2094 * @return Strict VBox status code.
2095 */
2096#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2097IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2098{
2099 AssertFailed();
2100 return VERR_NOT_IMPLEMENTED;
2101}
2102
2103
2104/**
2105 * Macro for calling iemCImplRaiseInvalidOpcode().
2106 *
2107 * This enables us to add/remove arguments and force different levels of
2108 * inlining as we wish.
2109 *
2110 * @return Strict VBox status code.
2111 */
2112#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2113IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2114{
2115 AssertFailed();
2116 return VERR_NOT_IMPLEMENTED;
2117}
2118
2119
2120/** @} */
2121
2122
2123/*
2124 *
2125 * Helpers routines.
2126 * Helpers routines.
2127 * Helpers routines.
2128 *
2129 */
2130
2131/**
2132 * Recalculates the effective operand size.
2133 *
2134 * @param pIemCpu The IEM state.
2135 */
2136static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2137{
2138 switch (pIemCpu->enmCpuMode)
2139 {
2140 case IEMMODE_16BIT:
2141 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2142 break;
2143 case IEMMODE_32BIT:
2144 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2145 break;
2146 case IEMMODE_64BIT:
2147 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2148 {
2149 case 0:
2150 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2151 break;
2152 case IEM_OP_PRF_SIZE_OP:
2153 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2154 break;
2155 case IEM_OP_PRF_SIZE_REX_W:
2156 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2157 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2158 break;
2159 }
2160 break;
2161 default:
2162 AssertFailed();
2163 }
2164}
2165
2166
2167/**
2168 * Sets the default operand size to 64-bit and recalculates the effective
2169 * operand size.
2170 *
2171 * @param pIemCpu The IEM state.
2172 */
2173static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2174{
2175 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2176 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2177 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2178 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2179 else
2180 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2181}
2182
2183
2184/*
2185 *
2186 * Common opcode decoders.
2187 * Common opcode decoders.
2188 * Common opcode decoders.
2189 *
2190 */
2191#include <iprt/mem.h>
2192
2193/**
2194 * Used to add extra details about a stub case.
2195 * @param pIemCpu The IEM per CPU state.
2196 */
2197static void iemOpStubMsg2(PIEMCPU pIemCpu)
2198{
2199 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2200 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2201 char szRegs[4096];
2202 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2203 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2204 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2205 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2206 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2207 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2208 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2209 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2210 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2211 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2212 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2213 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2214 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2215 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2216 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2217 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2218 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2219 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2220 " efer=%016VR{efer}\n"
2221 " pat=%016VR{pat}\n"
2222 " sf_mask=%016VR{sf_mask}\n"
2223 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2224 " lstar=%016VR{lstar}\n"
2225 " star=%016VR{star} cstar=%016VR{cstar}\n"
2226 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2227 );
2228
2229 char szInstr[256];
2230 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2231 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2232 szInstr, sizeof(szInstr), NULL);
2233
2234 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2235}
2236
2237
2238/** Stubs an opcode. */
2239#define FNIEMOP_STUB(a_Name) \
2240 FNIEMOP_DEF(a_Name) \
2241 { \
2242 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2243 iemOpStubMsg2(pIemCpu); \
2244 RTAssertPanic(); \
2245 return VERR_NOT_IMPLEMENTED; \
2246 } \
2247 typedef int ignore_semicolon
2248
2249/** Stubs an opcode. */
2250#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2251 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2252 { \
2253 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2254 iemOpStubMsg2(pIemCpu); \
2255 RTAssertPanic(); \
2256 return VERR_NOT_IMPLEMENTED; \
2257 } \
2258 typedef int ignore_semicolon
2259
2260
2261
2262/** @name Register Access.
2263 * @{
2264 */
2265
2266/**
2267 * Gets a reference (pointer) to the specified hidden segment register.
2268 *
2269 * @returns Hidden register reference.
2270 * @param pIemCpu The per CPU data.
2271 * @param iSegReg The segment register.
2272 */
2273static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2274{
2275 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2276 switch (iSegReg)
2277 {
2278 case X86_SREG_ES: return &pCtx->esHid;
2279 case X86_SREG_CS: return &pCtx->csHid;
2280 case X86_SREG_SS: return &pCtx->ssHid;
2281 case X86_SREG_DS: return &pCtx->dsHid;
2282 case X86_SREG_FS: return &pCtx->fsHid;
2283 case X86_SREG_GS: return &pCtx->gsHid;
2284 }
2285 AssertFailedReturn(NULL);
2286}
2287
2288
2289/**
2290 * Gets a reference (pointer) to the specified segment register (the selector
2291 * value).
2292 *
2293 * @returns Pointer to the selector variable.
2294 * @param pIemCpu The per CPU data.
2295 * @param iSegReg The segment register.
2296 */
2297static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2298{
2299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2300 switch (iSegReg)
2301 {
2302 case X86_SREG_ES: return &pCtx->es;
2303 case X86_SREG_CS: return &pCtx->cs;
2304 case X86_SREG_SS: return &pCtx->ss;
2305 case X86_SREG_DS: return &pCtx->ds;
2306 case X86_SREG_FS: return &pCtx->fs;
2307 case X86_SREG_GS: return &pCtx->gs;
2308 }
2309 AssertFailedReturn(NULL);
2310}
2311
2312
2313/**
2314 * Fetches the selector value of a segment register.
2315 *
2316 * @returns The selector value.
2317 * @param pIemCpu The per CPU data.
2318 * @param iSegReg The segment register.
2319 */
2320static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2321{
2322 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2323 switch (iSegReg)
2324 {
2325 case X86_SREG_ES: return pCtx->es;
2326 case X86_SREG_CS: return pCtx->cs;
2327 case X86_SREG_SS: return pCtx->ss;
2328 case X86_SREG_DS: return pCtx->ds;
2329 case X86_SREG_FS: return pCtx->fs;
2330 case X86_SREG_GS: return pCtx->gs;
2331 }
2332 AssertFailedReturn(0xffff);
2333}
2334
2335
2336/**
2337 * Gets a reference (pointer) to the specified general register.
2338 *
2339 * @returns Register reference.
2340 * @param pIemCpu The per CPU data.
2341 * @param iReg The general register.
2342 */
2343static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2344{
2345 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2346 switch (iReg)
2347 {
2348 case X86_GREG_xAX: return &pCtx->rax;
2349 case X86_GREG_xCX: return &pCtx->rcx;
2350 case X86_GREG_xDX: return &pCtx->rdx;
2351 case X86_GREG_xBX: return &pCtx->rbx;
2352 case X86_GREG_xSP: return &pCtx->rsp;
2353 case X86_GREG_xBP: return &pCtx->rbp;
2354 case X86_GREG_xSI: return &pCtx->rsi;
2355 case X86_GREG_xDI: return &pCtx->rdi;
2356 case X86_GREG_x8: return &pCtx->r8;
2357 case X86_GREG_x9: return &pCtx->r9;
2358 case X86_GREG_x10: return &pCtx->r10;
2359 case X86_GREG_x11: return &pCtx->r11;
2360 case X86_GREG_x12: return &pCtx->r12;
2361 case X86_GREG_x13: return &pCtx->r13;
2362 case X86_GREG_x14: return &pCtx->r14;
2363 case X86_GREG_x15: return &pCtx->r15;
2364 }
2365 AssertFailedReturn(NULL);
2366}
2367
2368
2369/**
2370 * Gets a reference (pointer) to the specified 8-bit general register.
2371 *
2372 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2373 *
2374 * @returns Register reference.
2375 * @param pIemCpu The per CPU data.
2376 * @param iReg The register.
2377 */
2378static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2379{
2380 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2381 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2382
2383 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2384 if (iReg >= 4)
2385 pu8Reg++;
2386 return pu8Reg;
2387}
2388
2389
2390/**
2391 * Fetches the value of a 8-bit general register.
2392 *
2393 * @returns The register value.
2394 * @param pIemCpu The per CPU data.
2395 * @param iReg The register.
2396 */
2397static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2398{
2399 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2400 return *pbSrc;
2401}
2402
2403
2404/**
2405 * Fetches the value of a 16-bit general register.
2406 *
2407 * @returns The register value.
2408 * @param pIemCpu The per CPU data.
2409 * @param iReg The register.
2410 */
2411static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2412{
2413 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2414}
2415
2416
2417/**
2418 * Fetches the value of a 32-bit general register.
2419 *
2420 * @returns The register value.
2421 * @param pIemCpu The per CPU data.
2422 * @param iReg The register.
2423 */
2424static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2425{
2426 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2427}
2428
2429
2430/**
2431 * Fetches the value of a 64-bit general register.
2432 *
2433 * @returns The register value.
2434 * @param pIemCpu The per CPU data.
2435 * @param iReg The register.
2436 */
2437static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2438{
2439 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2440}
2441
2442
2443/**
2444 * Is the FPU state in FXSAVE format or not.
2445 *
2446 * @returns true if it is, false if it's in FNSAVE.
2447 * @param pVCpu The virtual CPU handle.
2448 */
2449DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2450{
2451#ifdef RT_ARCH_AMD64
2452 return true;
2453#else
2454/// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2455 return true;
2456#endif
2457}
2458
2459
2460/**
2461 * Gets the FPU status word.
2462 *
2463 * @returns FPU status word
2464 * @param pIemCpu The per CPU data.
2465 */
2466static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
2467{
2468 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2469 uint16_t u16Fsw;
2470 if (iemFRegIsFxSaveFormat(pIemCpu))
2471 u16Fsw = pCtx->fpu.FSW;
2472 else
2473 {
2474 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2475 u16Fsw = pFpu->FSW;
2476 }
2477 return u16Fsw;
2478}
2479
2480/**
2481 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2482 *
2483 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2484 * segment limit.
2485 *
2486 * @param pIemCpu The per CPU data.
2487 * @param offNextInstr The offset of the next instruction.
2488 */
2489static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2490{
2491 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2492 switch (pIemCpu->enmEffOpSize)
2493 {
2494 case IEMMODE_16BIT:
2495 {
2496 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2497 if ( uNewIp > pCtx->csHid.u32Limit
2498 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2499 return iemRaiseGeneralProtectionFault0(pIemCpu);
2500 pCtx->rip = uNewIp;
2501 break;
2502 }
2503
2504 case IEMMODE_32BIT:
2505 {
2506 Assert(pCtx->rip <= UINT32_MAX);
2507 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2508
2509 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2510 if (uNewEip > pCtx->csHid.u32Limit)
2511 return iemRaiseGeneralProtectionFault0(pIemCpu);
2512 pCtx->rip = uNewEip;
2513 break;
2514 }
2515
2516 case IEMMODE_64BIT:
2517 {
2518 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2519
2520 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2521 if (!IEM_IS_CANONICAL(uNewRip))
2522 return iemRaiseGeneralProtectionFault0(pIemCpu);
2523 pCtx->rip = uNewRip;
2524 break;
2525 }
2526
2527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2528 }
2529
2530 return VINF_SUCCESS;
2531}
2532
2533
2534/**
2535 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2536 *
2537 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2538 * segment limit.
2539 *
2540 * @returns Strict VBox status code.
2541 * @param pIemCpu The per CPU data.
2542 * @param offNextInstr The offset of the next instruction.
2543 */
2544static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2545{
2546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2547 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2548
2549 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2550 if ( uNewIp > pCtx->csHid.u32Limit
2551 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2552 return iemRaiseGeneralProtectionFault0(pIemCpu);
2553 /** @todo Test 16-bit jump in 64-bit mode. */
2554 pCtx->rip = uNewIp;
2555
2556 return VINF_SUCCESS;
2557}
2558
2559
2560/**
2561 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2562 *
2563 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2564 * segment limit.
2565 *
2566 * @returns Strict VBox status code.
2567 * @param pIemCpu The per CPU data.
2568 * @param offNextInstr The offset of the next instruction.
2569 */
2570static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2571{
2572 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2573 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2574
2575 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2576 {
2577 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2578
2579 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2580 if (uNewEip > pCtx->csHid.u32Limit)
2581 return iemRaiseGeneralProtectionFault0(pIemCpu);
2582 pCtx->rip = uNewEip;
2583 }
2584 else
2585 {
2586 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2587
2588 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2589 if (!IEM_IS_CANONICAL(uNewRip))
2590 return iemRaiseGeneralProtectionFault0(pIemCpu);
2591 pCtx->rip = uNewRip;
2592 }
2593 return VINF_SUCCESS;
2594}
2595
2596
2597/**
2598 * Performs a near jump to the specified address.
2599 *
2600 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2601 * segment limit.
2602 *
2603 * @param pIemCpu The per CPU data.
2604 * @param uNewRip The new RIP value.
2605 */
2606static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2607{
2608 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2609 switch (pIemCpu->enmEffOpSize)
2610 {
2611 case IEMMODE_16BIT:
2612 {
2613 Assert(uNewRip <= UINT16_MAX);
2614 if ( uNewRip > pCtx->csHid.u32Limit
2615 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2616 return iemRaiseGeneralProtectionFault0(pIemCpu);
2617 /** @todo Test 16-bit jump in 64-bit mode. */
2618 pCtx->rip = uNewRip;
2619 break;
2620 }
2621
2622 case IEMMODE_32BIT:
2623 {
2624 Assert(uNewRip <= UINT32_MAX);
2625 Assert(pCtx->rip <= UINT32_MAX);
2626 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2627
2628 if (uNewRip > pCtx->csHid.u32Limit)
2629 return iemRaiseGeneralProtectionFault0(pIemCpu);
2630 pCtx->rip = uNewRip;
2631 break;
2632 }
2633
2634 case IEMMODE_64BIT:
2635 {
2636 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2637
2638 if (!IEM_IS_CANONICAL(uNewRip))
2639 return iemRaiseGeneralProtectionFault0(pIemCpu);
2640 pCtx->rip = uNewRip;
2641 break;
2642 }
2643
2644 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2645 }
2646
2647 return VINF_SUCCESS;
2648}
2649
2650
2651/**
2652 * Get the address of the top of the stack.
2653 *
2654 * @param pCtx The CPU context which SP/ESP/RSP should be
2655 * read.
2656 */
2657DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
2658{
2659 if (pCtx->ssHid.Attr.n.u1Long)
2660 return pCtx->rsp;
2661 if (pCtx->ssHid.Attr.n.u1DefBig)
2662 return pCtx->esp;
2663 return pCtx->sp;
2664}
2665
2666
2667/**
2668 * Updates the RIP/EIP/IP to point to the next instruction.
2669 *
2670 * @param pIemCpu The per CPU data.
2671 * @param cbInstr The number of bytes to add.
2672 */
2673static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
2674{
2675 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2676 switch (pIemCpu->enmCpuMode)
2677 {
2678 case IEMMODE_16BIT:
2679 Assert(pCtx->rip <= UINT16_MAX);
2680 pCtx->eip += cbInstr;
2681 pCtx->eip &= UINT32_C(0xffff);
2682 break;
2683
2684 case IEMMODE_32BIT:
2685 pCtx->eip += cbInstr;
2686 Assert(pCtx->rip <= UINT32_MAX);
2687 break;
2688
2689 case IEMMODE_64BIT:
2690 pCtx->rip += cbInstr;
2691 break;
2692 default: AssertFailed();
2693 }
2694}
2695
2696
2697/**
2698 * Updates the RIP/EIP/IP to point to the next instruction.
2699 *
2700 * @param pIemCpu The per CPU data.
2701 */
2702static void iemRegUpdateRip(PIEMCPU pIemCpu)
2703{
2704 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
2705}
2706
2707
2708/**
2709 * Adds to the stack pointer.
2710 *
2711 * @param pCtx The CPU context which SP/ESP/RSP should be
2712 * updated.
2713 * @param cbToAdd The number of bytes to add.
2714 */
2715DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
2716{
2717 if (pCtx->ssHid.Attr.n.u1Long)
2718 pCtx->rsp += cbToAdd;
2719 else if (pCtx->ssHid.Attr.n.u1DefBig)
2720 pCtx->esp += cbToAdd;
2721 else
2722 pCtx->sp += cbToAdd;
2723}
2724
2725
2726/**
2727 * Subtracts from the stack pointer.
2728 *
2729 * @param pCtx The CPU context which SP/ESP/RSP should be
2730 * updated.
2731 * @param cbToSub The number of bytes to subtract.
2732 */
2733DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
2734{
2735 if (pCtx->ssHid.Attr.n.u1Long)
2736 pCtx->rsp -= cbToSub;
2737 else if (pCtx->ssHid.Attr.n.u1DefBig)
2738 pCtx->esp -= cbToSub;
2739 else
2740 pCtx->sp -= cbToSub;
2741}
2742
2743
2744/**
2745 * Adds to the temporary stack pointer.
2746 *
2747 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2748 * @param cbToAdd The number of bytes to add.
2749 * @param pCtx Where to get the current stack mode.
2750 */
2751DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
2752{
2753 if (pCtx->ssHid.Attr.n.u1Long)
2754 pTmpRsp->u += cbToAdd;
2755 else if (pCtx->ssHid.Attr.n.u1DefBig)
2756 pTmpRsp->DWords.dw0 += cbToAdd;
2757 else
2758 pTmpRsp->Words.w0 += cbToAdd;
2759}
2760
2761
2762/**
2763 * Subtracts from the temporary stack pointer.
2764 *
2765 * @param pTmpRsp The temporary SP/ESP/RSP to update.
2766 * @param cbToSub The number of bytes to subtract.
2767 * @param pCtx Where to get the current stack mode.
2768 */
2769DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
2770{
2771 if (pCtx->ssHid.Attr.n.u1Long)
2772 pTmpRsp->u -= cbToSub;
2773 else if (pCtx->ssHid.Attr.n.u1DefBig)
2774 pTmpRsp->DWords.dw0 -= cbToSub;
2775 else
2776 pTmpRsp->Words.w0 -= cbToSub;
2777}
2778
2779
2780/**
2781 * Calculates the effective stack address for a push of the specified size as
2782 * well as the new RSP value (upper bits may be masked).
2783 *
2784 * @returns Effective stack addressf for the push.
2785 * @param pCtx Where to get the current stack mode.
2786 * @param cbItem The size of the stack item to pop.
2787 * @param puNewRsp Where to return the new RSP value.
2788 */
2789DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
2790{
2791 RTUINT64U uTmpRsp;
2792 RTGCPTR GCPtrTop;
2793 uTmpRsp.u = pCtx->rsp;
2794
2795 if (pCtx->ssHid.Attr.n.u1Long)
2796 GCPtrTop = uTmpRsp.u -= cbItem;
2797 else if (pCtx->ssHid.Attr.n.u1DefBig)
2798 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
2799 else
2800 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
2801 *puNewRsp = uTmpRsp.u;
2802 return GCPtrTop;
2803}
2804
2805
2806/**
2807 * Gets the current stack pointer and calculates the value after a pop of the
2808 * specified size.
2809 *
2810 * @returns Current stack pointer.
2811 * @param pCtx Where to get the current stack mode.
2812 * @param cbItem The size of the stack item to pop.
2813 * @param puNewRsp Where to return the new RSP value.
2814 */
2815DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
2816{
2817 RTUINT64U uTmpRsp;
2818 RTGCPTR GCPtrTop;
2819 uTmpRsp.u = pCtx->rsp;
2820
2821 if (pCtx->ssHid.Attr.n.u1Long)
2822 {
2823 GCPtrTop = uTmpRsp.u;
2824 uTmpRsp.u += cbItem;
2825 }
2826 else if (pCtx->ssHid.Attr.n.u1DefBig)
2827 {
2828 GCPtrTop = uTmpRsp.DWords.dw0;
2829 uTmpRsp.DWords.dw0 += cbItem;
2830 }
2831 else
2832 {
2833 GCPtrTop = uTmpRsp.Words.w0;
2834 uTmpRsp.Words.w0 += cbItem;
2835 }
2836 *puNewRsp = uTmpRsp.u;
2837 return GCPtrTop;
2838}
2839
2840
2841/**
2842 * Calculates the effective stack address for a push of the specified size as
2843 * well as the new temporary RSP value (upper bits may be masked).
2844 *
2845 * @returns Effective stack addressf for the push.
2846 * @param pTmpRsp The temporary stack pointer. This is updated.
2847 * @param cbItem The size of the stack item to pop.
2848 * @param puNewRsp Where to return the new RSP value.
2849 */
2850DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
2851{
2852 RTGCPTR GCPtrTop;
2853
2854 if (pCtx->ssHid.Attr.n.u1Long)
2855 GCPtrTop = pTmpRsp->u -= cbItem;
2856 else if (pCtx->ssHid.Attr.n.u1DefBig)
2857 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
2858 else
2859 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
2860 return GCPtrTop;
2861}
2862
2863
2864/**
2865 * Gets the effective stack address for a pop of the specified size and
2866 * calculates and updates the temporary RSP.
2867 *
2868 * @returns Current stack pointer.
2869 * @param pTmpRsp The temporary stack pointer. This is updated.
2870 * @param pCtx Where to get the current stack mode.
2871 * @param cbItem The size of the stack item to pop.
2872 */
2873DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
2874{
2875 RTGCPTR GCPtrTop;
2876 if (pCtx->ssHid.Attr.n.u1Long)
2877 {
2878 GCPtrTop = pTmpRsp->u;
2879 pTmpRsp->u += cbItem;
2880 }
2881 else if (pCtx->ssHid.Attr.n.u1DefBig)
2882 {
2883 GCPtrTop = pTmpRsp->DWords.dw0;
2884 pTmpRsp->DWords.dw0 += cbItem;
2885 }
2886 else
2887 {
2888 GCPtrTop = pTmpRsp->Words.w0;
2889 pTmpRsp->Words.w0 += cbItem;
2890 }
2891 return GCPtrTop;
2892}
2893
2894
2895/**
2896 * Checks if an Intel CPUID feature bit is set.
2897 *
2898 * @returns true / false.
2899 *
2900 * @param pIemCpu The IEM per CPU data.
2901 * @param fEdx The EDX bit to test, or 0 if ECX.
2902 * @param fEcx The ECX bit to test, or 0 if EDX.
2903 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
2904 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
2905 */
2906static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
2907{
2908 uint32_t uEax, uEbx, uEcx, uEdx;
2909 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
2910 return (fEcx && (uEcx & fEcx))
2911 || (fEdx && (uEdx & fEdx));
2912}
2913
2914
2915/**
2916 * Checks if an AMD CPUID feature bit is set.
2917 *
2918 * @returns true / false.
2919 *
2920 * @param pIemCpu The IEM per CPU data.
2921 * @param fEdx The EDX bit to test, or 0 if ECX.
2922 * @param fEcx The ECX bit to test, or 0 if EDX.
2923 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
2924 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
2925 */
2926static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
2927{
2928 uint32_t uEax, uEbx, uEcx, uEdx;
2929 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
2930 return (fEcx && (uEcx & fEcx))
2931 || (fEdx && (uEdx & fEdx));
2932}
2933
2934/** @} */
2935
2936
2937/** @name Memory access.
2938 *
2939 * @{
2940 */
2941
2942
2943/**
2944 * Checks if the given segment can be written to, raise the appropriate
2945 * exception if not.
2946 *
2947 * @returns VBox strict status code.
2948 *
2949 * @param pIemCpu The IEM per CPU data.
2950 * @param pHid Pointer to the hidden register.
2951 * @param iSegReg The register number.
2952 */
2953static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
2954{
2955 if (!pHid->Attr.n.u1Present)
2956 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
2957
2958 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
2959 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
2960 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
2961 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
2962
2963 /** @todo DPL/RPL/CPL? */
2964
2965 return VINF_SUCCESS;
2966}
2967
2968
2969/**
2970 * Checks if the given segment can be read from, raise the appropriate
2971 * exception if not.
2972 *
2973 * @returns VBox strict status code.
2974 *
2975 * @param pIemCpu The IEM per CPU data.
2976 * @param pHid Pointer to the hidden register.
2977 * @param iSegReg The register number.
2978 */
2979static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
2980{
2981 if (!pHid->Attr.n.u1Present)
2982 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
2983
2984 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
2985 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
2986 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
2987
2988 /** @todo DPL/RPL/CPL? */
2989
2990 return VINF_SUCCESS;
2991}
2992
2993
2994/**
2995 * Applies the segment limit, base and attributes.
2996 *
2997 * This may raise a \#GP or \#SS.
2998 *
2999 * @returns VBox strict status code.
3000 *
3001 * @param pIemCpu The IEM per CPU data.
3002 * @param fAccess The kind of access which is being performed.
3003 * @param iSegReg The index of the segment register to apply.
3004 * This is UINT8_MAX if none (for IDT, GDT, LDT,
3005 * TSS, ++).
3006 * @param pGCPtrMem Pointer to the guest memory address to apply
3007 * segmentation to. Input and output parameter.
3008 */
3009static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
3010 size_t cbMem, PRTGCPTR pGCPtrMem)
3011{
3012 if (iSegReg == UINT8_MAX)
3013 return VINF_SUCCESS;
3014
3015 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
3016 switch (pIemCpu->enmCpuMode)
3017 {
3018 case IEMMODE_16BIT:
3019 case IEMMODE_32BIT:
3020 {
3021 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
3022 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
3023
3024 Assert(pSel->Attr.n.u1Present);
3025 Assert(pSel->Attr.n.u1DescType);
3026 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
3027 {
3028 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3029 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3030 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3031
3032 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3033 {
3034 /** @todo CPL check. */
3035 }
3036
3037 /*
3038 * There are two kinds of data selectors, normal and expand down.
3039 */
3040 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
3041 {
3042 if ( GCPtrFirst32 > pSel->u32Limit
3043 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3044 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3045
3046 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3047 }
3048 else
3049 {
3050 /** @todo implement expand down segments. */
3051 AssertFailed(/** @todo implement this */);
3052 return VERR_NOT_IMPLEMENTED;
3053 }
3054 }
3055 else
3056 {
3057
3058 /*
3059 * Code selector and usually be used to read thru, writing is
3060 * only permitted in real and V8086 mode.
3061 */
3062 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3063 || ( (fAccess & IEM_ACCESS_TYPE_READ)
3064 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
3065 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
3066 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3067
3068 if ( GCPtrFirst32 > pSel->u32Limit
3069 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3070 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3071
3072 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3073 {
3074 /** @todo CPL check. */
3075 }
3076
3077 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3078 }
3079 return VINF_SUCCESS;
3080 }
3081
3082 case IEMMODE_64BIT:
3083 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
3084 *pGCPtrMem += pSel->u64Base;
3085 return VINF_SUCCESS;
3086
3087 default:
3088 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
3089 }
3090}
3091
3092
3093/**
3094 * Translates a virtual address to a physical physical address and checks if we
3095 * can access the page as specified.
3096 *
3097 * @param pIemCpu The IEM per CPU data.
3098 * @param GCPtrMem The virtual address.
3099 * @param fAccess The intended access.
3100 * @param pGCPhysMem Where to return the physical address.
3101 */
3102static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
3103 PRTGCPHYS pGCPhysMem)
3104{
3105 /** @todo Need a different PGM interface here. We're currently using
3106 * generic / REM interfaces. this won't cut it for R0 & RC. */
3107 RTGCPHYS GCPhys;
3108 uint64_t fFlags;
3109 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
3110 if (RT_FAILURE(rc))
3111 {
3112 /** @todo Check unassigned memory in unpaged mode. */
3113 *pGCPhysMem = NIL_RTGCPHYS;
3114 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
3115 }
3116
3117 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)
3118 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */
3119 && !(fFlags & X86_PTE_RW)
3120 && ( pIemCpu->uCpl != 0
3121 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )
3122 || ( !(fFlags & X86_PTE_US) /* Kernel memory */
3123 && pIemCpu->uCpl == 3)
3124 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */
3125 && (fFlags & X86_PTE_PAE_NX)
3126 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
3127 )
3128 )
3129 {
3130 *pGCPhysMem = NIL_RTGCPHYS;
3131 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3132 }
3133
3134 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
3135 *pGCPhysMem = GCPhys;
3136 return VINF_SUCCESS;
3137}
3138
3139
3140
3141/**
3142 * Maps a physical page.
3143 *
3144 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3145 * @param pIemCpu The IEM per CPU data.
3146 * @param GCPhysMem The physical address.
3147 * @param fAccess The intended access.
3148 * @param ppvMem Where to return the mapping address.
3149 */
3150static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
3151{
3152#ifdef IEM_VERIFICATION_MODE
3153 /* Force the alternative path so we can ignore writes. */
3154 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
3155 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3156#endif
3157
3158 /*
3159 * If we can map the page without trouble, do a block processing
3160 * until the end of the current page.
3161 */
3162 /** @todo need some better API. */
3163 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
3164 GCPhysMem,
3165 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3166 ppvMem);
3167}
3168
3169
3170/**
3171 * Looks up a memory mapping entry.
3172 *
3173 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
3174 * @param pIemCpu The IEM per CPU data.
3175 * @param pvMem The memory address.
3176 * @param fAccess The access to.
3177 */
3178DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3179{
3180 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
3181 if ( pIemCpu->aMemMappings[0].pv == pvMem
3182 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3183 return 0;
3184 if ( pIemCpu->aMemMappings[1].pv == pvMem
3185 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3186 return 1;
3187 if ( pIemCpu->aMemMappings[2].pv == pvMem
3188 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3189 return 2;
3190 return VERR_NOT_FOUND;
3191}
3192
3193
3194/**
3195 * Finds a free memmap entry when using iNextMapping doesn't work.
3196 *
3197 * @returns Memory mapping index, 1024 on failure.
3198 * @param pIemCpu The IEM per CPU data.
3199 */
3200static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
3201{
3202 /*
3203 * The easy case.
3204 */
3205 if (pIemCpu->cActiveMappings == 0)
3206 {
3207 pIemCpu->iNextMapping = 1;
3208 return 0;
3209 }
3210
3211 /* There should be enough mappings for all instructions. */
3212 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
3213
3214 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
3215 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
3216 return i;
3217
3218 AssertFailedReturn(1024);
3219}
3220
3221
3222/**
3223 * Commits a bounce buffer that needs writing back and unmaps it.
3224 *
3225 * @returns Strict VBox status code.
3226 * @param pIemCpu The IEM per CPU data.
3227 * @param iMemMap The index of the buffer to commit.
3228 */
3229static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
3230{
3231 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
3232 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
3233
3234 /*
3235 * Do the writing.
3236 */
3237 int rc;
3238 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
3239 && !IEM_VERIFICATION_ENABLED(pIemCpu))
3240 {
3241 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3242 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3243 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3244 if (!pIemCpu->fByPassHandlers)
3245 {
3246 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3247 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3248 pbBuf,
3249 cbFirst);
3250 if (cbSecond && rc == VINF_SUCCESS)
3251 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3252 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3253 pbBuf + cbFirst,
3254 cbSecond);
3255 }
3256 else
3257 {
3258 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3259 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3260 pbBuf,
3261 cbFirst);
3262 if (cbSecond && rc == VINF_SUCCESS)
3263 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3264 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3265 pbBuf + cbFirst,
3266 cbSecond);
3267 }
3268 }
3269 else
3270 rc = VINF_SUCCESS;
3271
3272#ifdef IEM_VERIFICATION_MODE
3273 /*
3274 * Record the write(s).
3275 */
3276 if (!pIemCpu->fNoRem)
3277 {
3278 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3279 if (pEvtRec)
3280 {
3281 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3282 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
3283 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3284 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
3285 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3286 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3287 }
3288 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
3289 {
3290 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3291 if (pEvtRec)
3292 {
3293 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3294 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
3295 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3296 memcpy(pEvtRec->u.RamWrite.ab,
3297 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
3298 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
3299 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3300 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3301 }
3302 }
3303 }
3304#endif
3305
3306 /*
3307 * Free the mapping entry.
3308 */
3309 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3310 Assert(pIemCpu->cActiveMappings != 0);
3311 pIemCpu->cActiveMappings--;
3312 return rc;
3313}
3314
3315
3316/**
3317 * iemMemMap worker that deals with a request crossing pages.
3318 */
3319static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
3320 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
3321{
3322 /*
3323 * Do the address translations.
3324 */
3325 RTGCPHYS GCPhysFirst;
3326 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
3327 if (rcStrict != VINF_SUCCESS)
3328 return rcStrict;
3329
3330 RTGCPHYS GCPhysSecond;
3331 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
3332 if (rcStrict != VINF_SUCCESS)
3333 return rcStrict;
3334 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
3335
3336 /*
3337 * Read in the current memory content if it's a read of execute access.
3338 */
3339 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3340 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
3341 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
3342
3343 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3344 {
3345 int rc;
3346 if (!pIemCpu->fByPassHandlers)
3347 {
3348 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
3349 if (rc != VINF_SUCCESS)
3350 return rc;
3351 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
3352 if (rc != VINF_SUCCESS)
3353 return rc;
3354 }
3355 else
3356 {
3357 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
3358 if (rc != VINF_SUCCESS)
3359 return rc;
3360 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
3361 if (rc != VINF_SUCCESS)
3362 return rc;
3363 }
3364
3365#ifdef IEM_VERIFICATION_MODE
3366 if (!pIemCpu->fNoRem)
3367 {
3368 /*
3369 * Record the reads.
3370 */
3371 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3372 if (pEvtRec)
3373 {
3374 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3375 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
3376 pEvtRec->u.RamRead.cb = cbFirstPage;
3377 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3378 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3379 }
3380 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3381 if (pEvtRec)
3382 {
3383 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3384 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
3385 pEvtRec->u.RamRead.cb = cbSecondPage;
3386 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3387 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3388 }
3389 }
3390#endif
3391 }
3392#ifdef VBOX_STRICT
3393 else
3394 memset(pbBuf, 0xcc, cbMem);
3395#endif
3396#ifdef VBOX_STRICT
3397 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
3398 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
3399#endif
3400
3401 /*
3402 * Commit the bounce buffer entry.
3403 */
3404 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
3405 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
3406 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
3407 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
3408 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
3409 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
3410 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
3411 pIemCpu->cActiveMappings++;
3412
3413 *ppvMem = pbBuf;
3414 return VINF_SUCCESS;
3415}
3416
3417
3418/**
3419 * iemMemMap woker that deals with iemMemPageMap failures.
3420 */
3421static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
3422 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
3423{
3424 /*
3425 * Filter out conditions we can handle and the ones which shouldn't happen.
3426 */
3427 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
3428 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
3429 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
3430 {
3431 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
3432 return rcMap;
3433 }
3434 pIemCpu->cPotentialExits++;
3435
3436 /*
3437 * Read in the current memory content if it's a read of execute access.
3438 */
3439 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3440 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
3441 {
3442 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
3443 memset(pbBuf, 0xff, cbMem);
3444 else
3445 {
3446 int rc;
3447 if (!pIemCpu->fByPassHandlers)
3448 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
3449 else
3450 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
3451 if (rc != VINF_SUCCESS)
3452 return rc;
3453 }
3454
3455#ifdef IEM_VERIFICATION_MODE
3456 if (!pIemCpu->fNoRem)
3457 {
3458 /*
3459 * Record the read.
3460 */
3461 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3462 if (pEvtRec)
3463 {
3464 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
3465 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
3466 pEvtRec->u.RamRead.cb = cbMem;
3467 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3468 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3469 }
3470 }
3471#endif
3472 }
3473#ifdef VBOX_STRICT
3474 else
3475 memset(pbBuf, 0xcc, cbMem);
3476#endif
3477#ifdef VBOX_STRICT
3478 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
3479 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
3480#endif
3481
3482 /*
3483 * Commit the bounce buffer entry.
3484 */
3485 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
3486 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
3487 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
3488 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
3489 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
3490 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
3491 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
3492 pIemCpu->cActiveMappings++;
3493
3494 *ppvMem = pbBuf;
3495 return VINF_SUCCESS;
3496}
3497
3498
3499
3500/**
3501 * Maps the specified guest memory for the given kind of access.
3502 *
3503 * This may be using bounce buffering of the memory if it's crossing a page
3504 * boundary or if there is an access handler installed for any of it. Because
3505 * of lock prefix guarantees, we're in for some extra clutter when this
3506 * happens.
3507 *
3508 * This may raise a \#GP, \#SS, \#PF or \#AC.
3509 *
3510 * @returns VBox strict status code.
3511 *
3512 * @param pIemCpu The IEM per CPU data.
3513 * @param ppvMem Where to return the pointer to the mapped
3514 * memory.
3515 * @param cbMem The number of bytes to map. This is usually 1,
3516 * 2, 4, 6, 8, 12, 16 or 32. When used by string
3517 * operations it can be up to a page.
3518 * @param iSegReg The index of the segment register to use for
3519 * this access. The base and limits are checked.
3520 * Use UINT8_MAX to indicate that no segmentation
3521 * is required (for IDT, GDT and LDT accesses).
3522 * @param GCPtrMem The address of the guest memory.
3523 * @param a_fAccess How the memory is being accessed. The
3524 * IEM_ACCESS_TYPE_XXX bit is used to figure out
3525 * how to map the memory, while the
3526 * IEM_ACCESS_WHAT_XXX bit is used when raising
3527 * exceptions.
3528 */
3529static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
3530{
3531 /*
3532 * Check the input and figure out which mapping entry to use.
3533 */
3534 Assert(cbMem <= 32);
3535 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
3536
3537 unsigned iMemMap = pIemCpu->iNextMapping;
3538 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
3539 {
3540 iMemMap = iemMemMapFindFree(pIemCpu);
3541 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
3542 }
3543
3544 /*
3545 * Map the memory, checking that we can actually access it. If something
3546 * slightly complicated happens, fall back on bounce buffering.
3547 */
3548 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
3549 if (rcStrict != VINF_SUCCESS)
3550 return rcStrict;
3551
3552 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
3553 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
3554
3555 RTGCPHYS GCPhysFirst;
3556 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
3557 if (rcStrict != VINF_SUCCESS)
3558 return rcStrict;
3559
3560 void *pvMem;
3561 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
3562 if (rcStrict != VINF_SUCCESS)
3563 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
3564
3565 /*
3566 * Fill in the mapping table entry.
3567 */
3568 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
3569 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
3570 pIemCpu->iNextMapping = iMemMap + 1;
3571 pIemCpu->cActiveMappings++;
3572
3573 *ppvMem = pvMem;
3574 return VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Commits the guest memory if bounce buffered and unmaps it.
3580 *
3581 * @returns Strict VBox status code.
3582 * @param pIemCpu The IEM per CPU data.
3583 * @param pvMem The mapping.
3584 * @param fAccess The kind of access.
3585 */
3586static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3587{
3588 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
3589 AssertReturn(iMemMap >= 0, iMemMap);
3590
3591 /*
3592 * If it's bounce buffered, we need to write back the buffer.
3593 */
3594 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
3595 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
3596 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
3597
3598 /* Free the entry. */
3599 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
3600 Assert(pIemCpu->cActiveMappings != 0);
3601 pIemCpu->cActiveMappings--;
3602 return VINF_SUCCESS;
3603}
3604
3605
3606/**
3607 * Fetches a data byte.
3608 *
3609 * @returns Strict VBox status code.
3610 * @param pIemCpu The IEM per CPU data.
3611 * @param pu8Dst Where to return the byte.
3612 * @param iSegReg The index of the segment register to use for
3613 * this access. The base and limits are checked.
3614 * @param GCPtrMem The address of the guest memory.
3615 */
3616static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3617{
3618 /* The lazy approach for now... */
3619 uint8_t const *pu8Src;
3620 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3621 if (rc == VINF_SUCCESS)
3622 {
3623 *pu8Dst = *pu8Src;
3624 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
3625 }
3626 return rc;
3627}
3628
3629
3630/**
3631 * Fetches a data word.
3632 *
3633 * @returns Strict VBox status code.
3634 * @param pIemCpu The IEM per CPU data.
3635 * @param pu16Dst Where to return the word.
3636 * @param iSegReg The index of the segment register to use for
3637 * this access. The base and limits are checked.
3638 * @param GCPtrMem The address of the guest memory.
3639 */
3640static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3641{
3642 /* The lazy approach for now... */
3643 uint16_t const *pu16Src;
3644 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3645 if (rc == VINF_SUCCESS)
3646 {
3647 *pu16Dst = *pu16Src;
3648 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
3649 }
3650 return rc;
3651}
3652
3653
3654/**
3655 * Fetches a data dword.
3656 *
3657 * @returns Strict VBox status code.
3658 * @param pIemCpu The IEM per CPU data.
3659 * @param pu32Dst Where to return the dword.
3660 * @param iSegReg The index of the segment register to use for
3661 * this access. The base and limits are checked.
3662 * @param GCPtrMem The address of the guest memory.
3663 */
3664static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3665{
3666 /* The lazy approach for now... */
3667 uint32_t const *pu32Src;
3668 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3669 if (rc == VINF_SUCCESS)
3670 {
3671 *pu32Dst = *pu32Src;
3672 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
3673 }
3674 return rc;
3675}
3676
3677
3678/**
3679 * Fetches a data dword and sign extends it to a qword.
3680 *
3681 * @returns Strict VBox status code.
3682 * @param pIemCpu The IEM per CPU data.
3683 * @param pu64Dst Where to return the sign extended value.
3684 * @param iSegReg The index of the segment register to use for
3685 * this access. The base and limits are checked.
3686 * @param GCPtrMem The address of the guest memory.
3687 */
3688static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3689{
3690 /* The lazy approach for now... */
3691 int32_t const *pi32Src;
3692 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3693 if (rc == VINF_SUCCESS)
3694 {
3695 *pu64Dst = *pi32Src;
3696 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
3697 }
3698#ifdef __GNUC__ /* warning: GCC may be a royal pain */
3699 else
3700 *pu64Dst = 0;
3701#endif
3702 return rc;
3703}
3704
3705
3706/**
3707 * Fetches a data qword.
3708 *
3709 * @returns Strict VBox status code.
3710 * @param pIemCpu The IEM per CPU data.
3711 * @param pu64Dst Where to return the qword.
3712 * @param iSegReg The index of the segment register to use for
3713 * this access. The base and limits are checked.
3714 * @param GCPtrMem The address of the guest memory.
3715 */
3716static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
3717{
3718 /* The lazy approach for now... */
3719 uint64_t const *pu64Src;
3720 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
3721 if (rc == VINF_SUCCESS)
3722 {
3723 *pu64Dst = *pu64Src;
3724 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
3725 }
3726 return rc;
3727}
3728
3729
3730/**
3731 * Fetches a descriptor register (lgdt, lidt).
3732 *
3733 * @returns Strict VBox status code.
3734 * @param pIemCpu The IEM per CPU data.
3735 * @param pcbLimit Where to return the limit.
3736 * @param pGCPTrBase Where to return the base.
3737 * @param iSegReg The index of the segment register to use for
3738 * this access. The base and limits are checked.
3739 * @param GCPtrMem The address of the guest memory.
3740 * @param enmOpSize The effective operand size.
3741 */
3742static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
3743 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
3744{
3745 uint8_t const *pu8Src;
3746 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
3747 (void **)&pu8Src,
3748 enmOpSize == IEMMODE_64BIT
3749 ? 2 + 8
3750 : enmOpSize == IEMMODE_32BIT
3751 ? 2 + 4
3752 : 2 + 3,
3753 iSegReg,
3754 GCPtrMem,
3755 IEM_ACCESS_DATA_R);
3756 if (rcStrict == VINF_SUCCESS)
3757 {
3758 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
3759 switch (enmOpSize)
3760 {
3761 case IEMMODE_16BIT:
3762 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
3763 break;
3764 case IEMMODE_32BIT:
3765 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
3766 break;
3767 case IEMMODE_64BIT:
3768 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
3769 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
3770 break;
3771
3772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3773 }
3774 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
3775 }
3776 return rcStrict;
3777}
3778
3779
3780
3781/**
3782 * Stores a data byte.
3783 *
3784 * @returns Strict VBox status code.
3785 * @param pIemCpu The IEM per CPU data.
3786 * @param iSegReg The index of the segment register to use for
3787 * this access. The base and limits are checked.
3788 * @param GCPtrMem The address of the guest memory.
3789 * @param u8Value The value to store.
3790 */
3791static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
3792{
3793 /* The lazy approach for now... */
3794 uint8_t *pu8Dst;
3795 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3796 if (rc == VINF_SUCCESS)
3797 {
3798 *pu8Dst = u8Value;
3799 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
3800 }
3801 return rc;
3802}
3803
3804
3805/**
3806 * Stores a data word.
3807 *
3808 * @returns Strict VBox status code.
3809 * @param pIemCpu The IEM per CPU data.
3810 * @param iSegReg The index of the segment register to use for
3811 * this access. The base and limits are checked.
3812 * @param GCPtrMem The address of the guest memory.
3813 * @param u16Value The value to store.
3814 */
3815static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
3816{
3817 /* The lazy approach for now... */
3818 uint16_t *pu16Dst;
3819 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3820 if (rc == VINF_SUCCESS)
3821 {
3822 *pu16Dst = u16Value;
3823 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
3824 }
3825 return rc;
3826}
3827
3828
3829/**
3830 * Stores a data dword.
3831 *
3832 * @returns Strict VBox status code.
3833 * @param pIemCpu The IEM per CPU data.
3834 * @param iSegReg The index of the segment register to use for
3835 * this access. The base and limits are checked.
3836 * @param GCPtrMem The address of the guest memory.
3837 * @param u32Value The value to store.
3838 */
3839static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
3840{
3841 /* The lazy approach for now... */
3842 uint32_t *pu32Dst;
3843 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3844 if (rc == VINF_SUCCESS)
3845 {
3846 *pu32Dst = u32Value;
3847 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
3848 }
3849 return rc;
3850}
3851
3852
3853/**
3854 * Stores a data qword.
3855 *
3856 * @returns Strict VBox status code.
3857 * @param pIemCpu The IEM per CPU data.
3858 * @param iSegReg The index of the segment register to use for
3859 * this access. The base and limits are checked.
3860 * @param GCPtrMem The address of the guest memory.
3861 * @param u64Value The value to store.
3862 */
3863static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
3864{
3865 /* The lazy approach for now... */
3866 uint64_t *pu64Dst;
3867 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
3868 if (rc == VINF_SUCCESS)
3869 {
3870 *pu64Dst = u64Value;
3871 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
3872 }
3873 return rc;
3874}
3875
3876
3877/**
3878 * Pushes a word onto the stack.
3879 *
3880 * @returns Strict VBox status code.
3881 * @param pIemCpu The IEM per CPU data.
3882 * @param u16Value The value to push.
3883 */
3884static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
3885{
3886 /* Increment the stack pointer. */
3887 uint64_t uNewRsp;
3888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3889 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
3890
3891 /* Write the word the lazy way. */
3892 uint16_t *pu16Dst;
3893 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3894 if (rc == VINF_SUCCESS)
3895 {
3896 *pu16Dst = u16Value;
3897 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
3898 }
3899
3900 /* Commit the new RSP value unless we an access handler made trouble. */
3901 if (rc == VINF_SUCCESS)
3902 pCtx->rsp = uNewRsp;
3903
3904 return rc;
3905}
3906
3907
3908/**
3909 * Pushes a dword onto the stack.
3910 *
3911 * @returns Strict VBox status code.
3912 * @param pIemCpu The IEM per CPU data.
3913 * @param u32Value The value to push.
3914 */
3915static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
3916{
3917 /* Increment the stack pointer. */
3918 uint64_t uNewRsp;
3919 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3920 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
3921
3922 /* Write the word the lazy way. */
3923 uint32_t *pu32Dst;
3924 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3925 if (rc == VINF_SUCCESS)
3926 {
3927 *pu32Dst = u32Value;
3928 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
3929 }
3930
3931 /* Commit the new RSP value unless we an access handler made trouble. */
3932 if (rc == VINF_SUCCESS)
3933 pCtx->rsp = uNewRsp;
3934
3935 return rc;
3936}
3937
3938
3939/**
3940 * Pushes a qword onto the stack.
3941 *
3942 * @returns Strict VBox status code.
3943 * @param pIemCpu The IEM per CPU data.
3944 * @param u64Value The value to push.
3945 */
3946static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
3947{
3948 /* Increment the stack pointer. */
3949 uint64_t uNewRsp;
3950 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3951 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
3952
3953 /* Write the word the lazy way. */
3954 uint64_t *pu64Dst;
3955 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
3956 if (rc == VINF_SUCCESS)
3957 {
3958 *pu64Dst = u64Value;
3959 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
3960 }
3961
3962 /* Commit the new RSP value unless we an access handler made trouble. */
3963 if (rc == VINF_SUCCESS)
3964 pCtx->rsp = uNewRsp;
3965
3966 return rc;
3967}
3968
3969
3970/**
3971 * Pops a word from the stack.
3972 *
3973 * @returns Strict VBox status code.
3974 * @param pIemCpu The IEM per CPU data.
3975 * @param pu16Value Where to store the popped value.
3976 */
3977static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
3978{
3979 /* Increment the stack pointer. */
3980 uint64_t uNewRsp;
3981 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3982 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
3983
3984 /* Write the word the lazy way. */
3985 uint16_t const *pu16Src;
3986 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
3987 if (rc == VINF_SUCCESS)
3988 {
3989 *pu16Value = *pu16Src;
3990 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
3991
3992 /* Commit the new RSP value. */
3993 if (rc == VINF_SUCCESS)
3994 pCtx->rsp = uNewRsp;
3995 }
3996
3997 return rc;
3998}
3999
4000
4001/**
4002 * Pops a dword from the stack.
4003 *
4004 * @returns Strict VBox status code.
4005 * @param pIemCpu The IEM per CPU data.
4006 * @param pu32Value Where to store the popped value.
4007 */
4008static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
4009{
4010 /* Increment the stack pointer. */
4011 uint64_t uNewRsp;
4012 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4013 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
4014
4015 /* Write the word the lazy way. */
4016 uint32_t const *pu32Src;
4017 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4018 if (rc == VINF_SUCCESS)
4019 {
4020 *pu32Value = *pu32Src;
4021 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4022
4023 /* Commit the new RSP value. */
4024 if (rc == VINF_SUCCESS)
4025 pCtx->rsp = uNewRsp;
4026 }
4027
4028 return rc;
4029}
4030
4031
4032/**
4033 * Pops a qword from the stack.
4034 *
4035 * @returns Strict VBox status code.
4036 * @param pIemCpu The IEM per CPU data.
4037 * @param pu64Value Where to store the popped value.
4038 */
4039static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
4040{
4041 /* Increment the stack pointer. */
4042 uint64_t uNewRsp;
4043 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4044 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
4045
4046 /* Write the word the lazy way. */
4047 uint64_t const *pu64Src;
4048 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4049 if (rc == VINF_SUCCESS)
4050 {
4051 *pu64Value = *pu64Src;
4052 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4053
4054 /* Commit the new RSP value. */
4055 if (rc == VINF_SUCCESS)
4056 pCtx->rsp = uNewRsp;
4057 }
4058
4059 return rc;
4060}
4061
4062
4063/**
4064 * Pushes a word onto the stack, using a temporary stack pointer.
4065 *
4066 * @returns Strict VBox status code.
4067 * @param pIemCpu The IEM per CPU data.
4068 * @param u16Value The value to push.
4069 * @param pTmpRsp Pointer to the temporary stack pointer.
4070 */
4071static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
4072{
4073 /* Increment the stack pointer. */
4074 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4075 RTUINT64U NewRsp = *pTmpRsp;
4076 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
4077
4078 /* Write the word the lazy way. */
4079 uint16_t *pu16Dst;
4080 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4081 if (rc == VINF_SUCCESS)
4082 {
4083 *pu16Dst = u16Value;
4084 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4085 }
4086
4087 /* Commit the new RSP value unless we an access handler made trouble. */
4088 if (rc == VINF_SUCCESS)
4089 *pTmpRsp = NewRsp;
4090
4091 return rc;
4092}
4093
4094
4095/**
4096 * Pushes a dword onto the stack, using a temporary stack pointer.
4097 *
4098 * @returns Strict VBox status code.
4099 * @param pIemCpu The IEM per CPU data.
4100 * @param u32Value The value to push.
4101 * @param pTmpRsp Pointer to the temporary stack pointer.
4102 */
4103static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
4104{
4105 /* Increment the stack pointer. */
4106 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4107 RTUINT64U NewRsp = *pTmpRsp;
4108 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
4109
4110 /* Write the word the lazy way. */
4111 uint32_t *pu32Dst;
4112 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4113 if (rc == VINF_SUCCESS)
4114 {
4115 *pu32Dst = u32Value;
4116 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4117 }
4118
4119 /* Commit the new RSP value unless we an access handler made trouble. */
4120 if (rc == VINF_SUCCESS)
4121 *pTmpRsp = NewRsp;
4122
4123 return rc;
4124}
4125
4126
4127/**
4128 * Pushes a dword onto the stack, using a temporary stack pointer.
4129 *
4130 * @returns Strict VBox status code.
4131 * @param pIemCpu The IEM per CPU data.
4132 * @param u64Value The value to push.
4133 * @param pTmpRsp Pointer to the temporary stack pointer.
4134 */
4135static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
4136{
4137 /* Increment the stack pointer. */
4138 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4139 RTUINT64U NewRsp = *pTmpRsp;
4140 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
4141
4142 /* Write the word the lazy way. */
4143 uint64_t *pu64Dst;
4144 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4145 if (rc == VINF_SUCCESS)
4146 {
4147 *pu64Dst = u64Value;
4148 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4149 }
4150
4151 /* Commit the new RSP value unless we an access handler made trouble. */
4152 if (rc == VINF_SUCCESS)
4153 *pTmpRsp = NewRsp;
4154
4155 return rc;
4156}
4157
4158
4159/**
4160 * Pops a word from the stack, using a temporary stack pointer.
4161 *
4162 * @returns Strict VBox status code.
4163 * @param pIemCpu The IEM per CPU data.
4164 * @param pu16Value Where to store the popped value.
4165 * @param pTmpRsp Pointer to the temporary stack pointer.
4166 */
4167static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
4168{
4169 /* Increment the stack pointer. */
4170 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4171 RTUINT64U NewRsp = *pTmpRsp;
4172 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
4173
4174 /* Write the word the lazy way. */
4175 uint16_t const *pu16Src;
4176 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4177 if (rc == VINF_SUCCESS)
4178 {
4179 *pu16Value = *pu16Src;
4180 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4181
4182 /* Commit the new RSP value. */
4183 if (rc == VINF_SUCCESS)
4184 *pTmpRsp = NewRsp;
4185 }
4186
4187 return rc;
4188}
4189
4190
4191/**
4192 * Pops a dword from the stack, using a temporary stack pointer.
4193 *
4194 * @returns Strict VBox status code.
4195 * @param pIemCpu The IEM per CPU data.
4196 * @param pu32Value Where to store the popped value.
4197 * @param pTmpRsp Pointer to the temporary stack pointer.
4198 */
4199static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
4200{
4201 /* Increment the stack pointer. */
4202 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4203 RTUINT64U NewRsp = *pTmpRsp;
4204 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
4205
4206 /* Write the word the lazy way. */
4207 uint32_t const *pu32Src;
4208 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4209 if (rc == VINF_SUCCESS)
4210 {
4211 *pu32Value = *pu32Src;
4212 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4213
4214 /* Commit the new RSP value. */
4215 if (rc == VINF_SUCCESS)
4216 *pTmpRsp = NewRsp;
4217 }
4218
4219 return rc;
4220}
4221
4222
4223/**
4224 * Pops a qword from the stack, using a temporary stack pointer.
4225 *
4226 * @returns Strict VBox status code.
4227 * @param pIemCpu The IEM per CPU data.
4228 * @param pu64Value Where to store the popped value.
4229 * @param pTmpRsp Pointer to the temporary stack pointer.
4230 */
4231static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
4232{
4233 /* Increment the stack pointer. */
4234 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4235 RTUINT64U NewRsp = *pTmpRsp;
4236 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4237
4238 /* Write the word the lazy way. */
4239 uint64_t const *pu64Src;
4240 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4241 if (rcStrict == VINF_SUCCESS)
4242 {
4243 *pu64Value = *pu64Src;
4244 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4245
4246 /* Commit the new RSP value. */
4247 if (rcStrict == VINF_SUCCESS)
4248 *pTmpRsp = NewRsp;
4249 }
4250
4251 return rcStrict;
4252}
4253
4254
4255/**
4256 * Begin a special stack push (used by interrupt, exceptions and such).
4257 *
4258 * This will raise #SS or #PF if appropriate.
4259 *
4260 * @returns Strict VBox status code.
4261 * @param pIemCpu The IEM per CPU data.
4262 * @param cbMem The number of bytes to push onto the stack.
4263 * @param ppvMem Where to return the pointer to the stack memory.
4264 * As with the other memory functions this could be
4265 * direct access or bounce buffered access, so
4266 * don't commit register until the commit call
4267 * succeeds.
4268 * @param puNewRsp Where to return the new RSP value. This must be
4269 * passed unchanged to
4270 * iemMemStackPushCommitSpecial().
4271 */
4272static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
4273{
4274 Assert(cbMem < UINT8_MAX);
4275 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4276 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
4277 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4278}
4279
4280
4281/**
4282 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
4283 *
4284 * This will update the rSP.
4285 *
4286 * @returns Strict VBox status code.
4287 * @param pIemCpu The IEM per CPU data.
4288 * @param pvMem The pointer returned by
4289 * iemMemStackPushBeginSpecial().
4290 * @param uNewRsp The new RSP value returned by
4291 * iemMemStackPushBeginSpecial().
4292 */
4293static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
4294{
4295 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
4296 if (rcStrict == VINF_SUCCESS)
4297 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4298 return rcStrict;
4299}
4300
4301
4302/**
4303 * Begin a special stack pop (used by iret, retf and such).
4304 *
4305 * This will raise #SS or #PF if appropriate.
4306 *
4307 * @returns Strict VBox status code.
4308 * @param pIemCpu The IEM per CPU data.
4309 * @param cbMem The number of bytes to push onto the stack.
4310 * @param ppvMem Where to return the pointer to the stack memory.
4311 * @param puNewRsp Where to return the new RSP value. This must be
4312 * passed unchanged to
4313 * iemMemStackPopCommitSpecial().
4314 */
4315static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
4316{
4317 Assert(cbMem < UINT8_MAX);
4318 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4319 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
4320 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4321}
4322
4323
4324/**
4325 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
4326 *
4327 * This will update the rSP.
4328 *
4329 * @returns Strict VBox status code.
4330 * @param pIemCpu The IEM per CPU data.
4331 * @param pvMem The pointer returned by
4332 * iemMemStackPopBeginSpecial().
4333 * @param uNewRsp The new RSP value returned by
4334 * iemMemStackPopBeginSpecial().
4335 */
4336static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
4337{
4338 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
4339 if (rcStrict == VINF_SUCCESS)
4340 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
4341 return rcStrict;
4342}
4343
4344
4345/**
4346 * Fetches a descriptor table entry.
4347 *
4348 * @returns Strict VBox status code.
4349 * @param pIemCpu The IEM per CPU.
4350 * @param pDesc Where to return the descriptor table entry.
4351 * @param uSel The selector which table entry to fetch.
4352 */
4353static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
4354{
4355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4356
4357 /** @todo did the 286 require all 8 bytes to be accessible? */
4358 /*
4359 * Get the selector table base and check bounds.
4360 */
4361 RTGCPTR GCPtrBase;
4362 if (uSel & X86_SEL_LDT)
4363 {
4364 if ( !pCtx->ldtrHid.Attr.n.u1Present
4365 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
4366 {
4367 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
4368 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
4369 /** @todo is this the right exception? */
4370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4371 }
4372
4373 Assert(pCtx->ldtrHid.Attr.n.u1Present);
4374 GCPtrBase = pCtx->ldtrHid.u64Base;
4375 }
4376 else
4377 {
4378 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
4379 {
4380 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
4381 /** @todo is this the right exception? */
4382 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4383 }
4384 GCPtrBase = pCtx->gdtr.pGdt;
4385 }
4386
4387 /*
4388 * Read the legacy descriptor and maybe the long mode extensions if
4389 * required.
4390 */
4391 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4392 if (rcStrict == VINF_SUCCESS)
4393 {
4394 if ( !IEM_IS_LONG_MODE(pIemCpu)
4395 || pDesc->Legacy.Gen.u1DescType)
4396 pDesc->Long.au64[1] = 0;
4397 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
4398 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4399 else
4400 {
4401 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
4402 /** @todo is this the right exception? */
4403 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4404 }
4405 }
4406 return rcStrict;
4407}
4408
4409
4410/**
4411 * Marks the selector descriptor as accessed (only non-system descriptors).
4412 *
4413 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
4414 * will therefore skip the limit checks.
4415 *
4416 * @returns Strict VBox status code.
4417 * @param pIemCpu The IEM per CPU.
4418 * @param uSel The selector.
4419 */
4420static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
4421{
4422 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4423
4424 /*
4425 * Get the selector table base and calculate the entry address.
4426 */
4427 RTGCPTR GCPtr = uSel & X86_SEL_LDT
4428 ? pCtx->ldtrHid.u64Base
4429 : pCtx->gdtr.pGdt;
4430 GCPtr += uSel & X86_SEL_MASK;
4431
4432 /*
4433 * ASMAtomicBitSet will assert if the address is misaligned, so do some
4434 * ugly stuff to avoid this. This will make sure it's an atomic access
4435 * as well more or less remove any question about 8-bit or 32-bit accesss.
4436 */
4437 VBOXSTRICTRC rcStrict;
4438 uint32_t volatile *pu32;
4439 if ((GCPtr & 3) == 0)
4440 {
4441 /* The normal case, map the 32-bit bits around the accessed bit (40). */
4442 GCPtr += 2 + 2;
4443 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
4444 if (rcStrict != VINF_SUCCESS)
4445 return rcStrict;
4446 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
4447 }
4448 else
4449 {
4450 /* The misaligned GDT/LDT case, map the whole thing. */
4451 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);
4452 if (rcStrict != VINF_SUCCESS)
4453 return rcStrict;
4454 switch ((uintptr_t)pu32 & 3)
4455 {
4456 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
4457 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
4458 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
4459 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
4460 }
4461 }
4462
4463 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);
4464}
4465
4466/** @} */
4467
4468
4469/*
4470 * Include the C/C++ implementation of instruction.
4471 */
4472#include "IEMAllCImpl.cpp.h"
4473
4474
4475
4476/** @name "Microcode" macros.
4477 *
4478 * The idea is that we should be able to use the same code to interpret
4479 * instructions as well as recompiler instructions. Thus this obfuscation.
4480 *
4481 * @{
4482 */
4483#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
4484#define IEM_MC_END() }
4485#define IEM_MC_PAUSE() do {} while (0)
4486#define IEM_MC_CONTINUE() do {} while (0)
4487
4488/** Internal macro. */
4489#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
4490 do \
4491 { \
4492 VBOXSTRICTRC rcStrict2 = a_Expr; \
4493 if (rcStrict2 != VINF_SUCCESS) \
4494 return rcStrict2; \
4495 } while (0)
4496
4497#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
4498#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
4499#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
4500#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
4501#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
4502#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
4503#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
4504
4505#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
4506#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
4507 do { \
4508 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
4509 return iemRaiseDeviceNotAvailable(pIemCpu); \
4510 } while (0)
4511#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
4512 do { \
4513 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
4514 return iemRaiseMathFault(pIemCpu); \
4515 } while (0)
4516#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
4517 do { \
4518 if (pIemCpu->uCpl != 0) \
4519 return iemRaiseGeneralProtectionFault0(pIemCpu); \
4520 } while (0)
4521
4522
4523#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
4524#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
4525#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
4526#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
4527#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
4528#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
4529 uint32_t a_Name; \
4530 uint32_t *a_pName = &a_Name
4531#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
4532 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
4533
4534#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
4535#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
4536
4537#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4538#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4539#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4540#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
4541#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4542#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4543#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
4544#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4545#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4546#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
4547#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
4548#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
4549#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
4550#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
4551#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
4552#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
4553#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
4554#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4555#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4556#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
4557#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
4558#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
4559#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
4560#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
4561#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
4562
4563#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
4564#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
4565#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
4566#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
4567#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
4568#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
4569#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
4570#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
4571#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
4572
4573#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
4574#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
4575/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
4576 * commit. */
4577#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
4578#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
4579#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
4580
4581#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
4582#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
4583#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
4584 do { \
4585 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
4586 *pu32Reg += (a_u32Value); \
4587 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
4588 } while (0)
4589#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
4590
4591#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
4592#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
4593#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
4594 do { \
4595 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
4596 *pu32Reg -= (a_u32Value); \
4597 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
4598 } while (0)
4599#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
4600
4601#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
4602#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
4603#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
4604#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
4605#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
4606#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
4607#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
4608
4609#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
4610#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
4611#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
4612
4613#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
4614#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
4615#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
4616
4617#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
4618#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
4619#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
4620
4621#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
4622#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
4623#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
4624
4625
4626#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
4627#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
4628
4629
4630
4631#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
4632 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
4633#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
4634 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
4635#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
4636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
4637
4638#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
4639 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
4640#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
4641 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
4642
4643#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
4644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
4645#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
4646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
4647
4648#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
4650
4651#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
4653#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
4654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
4655
4656#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
4657 do { \
4658 uint8_t u8Tmp; \
4659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4660 (a_u16Dst) = u8Tmp; \
4661 } while (0)
4662#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
4663 do { \
4664 uint8_t u8Tmp; \
4665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4666 (a_u32Dst) = u8Tmp; \
4667 } while (0)
4668#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4669 do { \
4670 uint8_t u8Tmp; \
4671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4672 (a_u64Dst) = u8Tmp; \
4673 } while (0)
4674#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
4675 do { \
4676 uint16_t u16Tmp; \
4677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
4678 (a_u32Dst) = u16Tmp; \
4679 } while (0)
4680#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4681 do { \
4682 uint16_t u16Tmp; \
4683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
4684 (a_u64Dst) = u16Tmp; \
4685 } while (0)
4686#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4687 do { \
4688 uint32_t u32Tmp; \
4689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
4690 (a_u64Dst) = u32Tmp; \
4691 } while (0)
4692
4693#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
4694 do { \
4695 uint8_t u8Tmp; \
4696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4697 (a_u16Dst) = (int8_t)u8Tmp; \
4698 } while (0)
4699#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
4700 do { \
4701 uint8_t u8Tmp; \
4702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4703 (a_u32Dst) = (int8_t)u8Tmp; \
4704 } while (0)
4705#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4706 do { \
4707 uint8_t u8Tmp; \
4708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
4709 (a_u64Dst) = (int8_t)u8Tmp; \
4710 } while (0)
4711#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
4712 do { \
4713 uint16_t u16Tmp; \
4714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
4715 (a_u32Dst) = (int16_t)u16Tmp; \
4716 } while (0)
4717#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4718 do { \
4719 uint16_t u16Tmp; \
4720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
4721 (a_u64Dst) = (int16_t)u16Tmp; \
4722 } while (0)
4723#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
4724 do { \
4725 uint32_t u32Tmp; \
4726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
4727 (a_u64Dst) = (int32_t)u32Tmp; \
4728 } while (0)
4729
4730#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
4731 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
4732#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
4733 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
4734#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
4735 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
4736#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
4737 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
4738
4739#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
4740 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
4741
4742#define IEM_MC_PUSH_U16(a_u16Value) \
4743 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
4744#define IEM_MC_PUSH_U32(a_u32Value) \
4745 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
4746#define IEM_MC_PUSH_U64(a_u64Value) \
4747 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
4748
4749#define IEM_MC_POP_U16(a_pu16Value) \
4750 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
4751#define IEM_MC_POP_U32(a_pu32Value) \
4752 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
4753#define IEM_MC_POP_U64(a_pu64Value) \
4754 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
4755
4756/** Maps guest memory for direct or bounce buffered access.
4757 * The purpose is to pass it to an operand implementation, thus the a_iArg.
4758 * @remarks May return.
4759 */
4760#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
4761 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
4762
4763/** Maps guest memory for direct or bounce buffered access.
4764 * The purpose is to pass it to an operand implementation, thus the a_iArg.
4765 * @remarks May return.
4766 */
4767#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
4768 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
4769
4770/** Commits the memory and unmaps the guest memory.
4771 * @remarks May return.
4772 */
4773#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
4774 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
4775
4776/** Calculate efficient address from R/M. */
4777#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
4778 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
4779
4780#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
4781#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
4782#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
4783#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
4784
4785/**
4786 * Defers the rest of the instruction emulation to a C implementation routine
4787 * and returns, only taking the standard parameters.
4788 *
4789 * @param a_pfnCImpl The pointer to the C routine.
4790 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
4791 */
4792#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
4793
4794/**
4795 * Defers the rest of instruction emulation to a C implementation routine and
4796 * returns, taking one argument in addition to the standard ones.
4797 *
4798 * @param a_pfnCImpl The pointer to the C routine.
4799 * @param a0 The argument.
4800 */
4801#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
4802
4803/**
4804 * Defers the rest of the instruction emulation to a C implementation routine
4805 * and returns, taking two arguments in addition to the standard ones.
4806 *
4807 * @param a_pfnCImpl The pointer to the C routine.
4808 * @param a0 The first extra argument.
4809 * @param a1 The second extra argument.
4810 */
4811#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
4812
4813/**
4814 * Defers the rest of the instruction emulation to a C implementation routine
4815 * and returns, taking two arguments in addition to the standard ones.
4816 *
4817 * @param a_pfnCImpl The pointer to the C routine.
4818 * @param a0 The first extra argument.
4819 * @param a1 The second extra argument.
4820 * @param a2 The third extra argument.
4821 */
4822#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
4823
4824/**
4825 * Defers the rest of the instruction emulation to a C implementation routine
4826 * and returns, taking two arguments in addition to the standard ones.
4827 *
4828 * @param a_pfnCImpl The pointer to the C routine.
4829 * @param a0 The first extra argument.
4830 * @param a1 The second extra argument.
4831 * @param a2 The third extra argument.
4832 * @param a3 The fourth extra argument.
4833 * @param a4 The fifth extra argument.
4834 */
4835#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
4836
4837/**
4838 * Defers the entire instruction emulation to a C implementation routine and
4839 * returns, only taking the standard parameters.
4840 *
4841 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4842 *
4843 * @param a_pfnCImpl The pointer to the C routine.
4844 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
4845 */
4846#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
4847
4848/**
4849 * Defers the entire instruction emulation to a C implementation routine and
4850 * returns, taking one argument in addition to the standard ones.
4851 *
4852 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4853 *
4854 * @param a_pfnCImpl The pointer to the C routine.
4855 * @param a0 The argument.
4856 */
4857#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
4858
4859/**
4860 * Defers the entire instruction emulation to a C implementation routine and
4861 * returns, taking two arguments in addition to the standard ones.
4862 *
4863 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4864 *
4865 * @param a_pfnCImpl The pointer to the C routine.
4866 * @param a0 The first extra argument.
4867 * @param a1 The second extra argument.
4868 */
4869#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
4870
4871/**
4872 * Defers the entire instruction emulation to a C implementation routine and
4873 * returns, taking three arguments in addition to the standard ones.
4874 *
4875 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
4876 *
4877 * @param a_pfnCImpl The pointer to the C routine.
4878 * @param a0 The first extra argument.
4879 * @param a1 The second extra argument.
4880 * @param a2 The third extra argument.
4881 */
4882#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
4883
4884#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
4885#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
4886#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
4887#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
4888#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
4889 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4890 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4891#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
4892 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4893 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4894#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
4895 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
4896 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4897 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4898#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
4899 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
4900 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
4901 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
4902#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
4903#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
4904#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
4905#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4906 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
4907 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4908#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4909 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
4910 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4911#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
4912 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
4913 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4914#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4915 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
4916 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4917#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4918 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
4919 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4920#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
4921 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
4922 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
4923#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
4924#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
4925#define IEM_MC_ELSE() } else {
4926#define IEM_MC_ENDIF() } do {} while (0)
4927
4928/** @} */
4929
4930
4931/** @name Opcode Debug Helpers.
4932 * @{
4933 */
4934#ifdef DEBUG
4935# define IEMOP_MNEMONIC(a_szMnemonic) \
4936 Log2(("decode - %04x:%RGv %s%s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
4937 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic))
4938# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
4939 Log2(("decode - %04x:%RGv %s%s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
4940 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps))
4941#else
4942# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
4943# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
4944#endif
4945
4946/** @} */
4947
4948
4949/** @name Opcode Helpers.
4950 * @{
4951 */
4952
4953/** The instruction allows no lock prefixing (in this encoding), throw #UD if
4954 * lock prefixed. */
4955#define IEMOP_HLP_NO_LOCK_PREFIX() \
4956 do \
4957 { \
4958 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
4959 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
4960 } while (0)
4961
4962/** The instruction is not available in 64-bit mode, throw #UD if we're in
4963 * 64-bit mode. */
4964#define IEMOP_HLP_NO_64BIT() \
4965 do \
4966 { \
4967 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
4968 return IEMOP_RAISE_INVALID_OPCODE(); \
4969 } while (0)
4970
4971/** The instruction defaults to 64-bit operand size if 64-bit mode. */
4972#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
4973 do \
4974 { \
4975 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
4976 iemRecalEffOpSize64Default(pIemCpu); \
4977 } while (0)
4978
4979
4980
4981/**
4982 * Calculates the effective address of a ModR/M memory operand.
4983 *
4984 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
4985 *
4986 * @return Strict VBox status code.
4987 * @param pIemCpu The IEM per CPU data.
4988 * @param bRm The ModRM byte.
4989 * @param pGCPtrEff Where to return the effective address.
4990 */
4991static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
4992{
4993 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
4994 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4995#define SET_SS_DEF() \
4996 do \
4997 { \
4998 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
4999 pIemCpu->iEffSeg = X86_SREG_SS; \
5000 } while (0)
5001
5002/** @todo Check the effective address size crap! */
5003 switch (pIemCpu->enmEffAddrMode)
5004 {
5005 case IEMMODE_16BIT:
5006 {
5007 uint16_t u16EffAddr;
5008
5009 /* Handle the disp16 form with no registers first. */
5010 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5011 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
5012 else
5013 {
5014 /* Get the displacment. */
5015 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5016 {
5017 case 0: u16EffAddr = 0; break;
5018 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
5019 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
5020 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5021 }
5022
5023 /* Add the base and index registers to the disp. */
5024 switch (bRm & X86_MODRM_RM_MASK)
5025 {
5026 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5027 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5028 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5029 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5030 case 4: u16EffAddr += pCtx->si; break;
5031 case 5: u16EffAddr += pCtx->di; break;
5032 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5033 case 7: u16EffAddr += pCtx->bx; break;
5034 }
5035 }
5036
5037 *pGCPtrEff = u16EffAddr;
5038 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5039 return VINF_SUCCESS;
5040 }
5041
5042 case IEMMODE_32BIT:
5043 {
5044 uint32_t u32EffAddr;
5045
5046 /* Handle the disp32 form with no registers first. */
5047 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5048 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
5049 else
5050 {
5051 /* Get the register (or SIB) value. */
5052 switch ((bRm & X86_MODRM_RM_MASK))
5053 {
5054 case 0: u32EffAddr = pCtx->eax; break;
5055 case 1: u32EffAddr = pCtx->ecx; break;
5056 case 2: u32EffAddr = pCtx->edx; break;
5057 case 3: u32EffAddr = pCtx->ebx; break;
5058 case 4: /* SIB */
5059 {
5060 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5061
5062 /* Get the index and scale it. */
5063 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
5064 {
5065 case 0: u32EffAddr = pCtx->eax; break;
5066 case 1: u32EffAddr = pCtx->ecx; break;
5067 case 2: u32EffAddr = pCtx->edx; break;
5068 case 3: u32EffAddr = pCtx->ebx; break;
5069 case 4: u32EffAddr = 0; /*none */ break;
5070 case 5: u32EffAddr = pCtx->ebp; break;
5071 case 6: u32EffAddr = pCtx->esi; break;
5072 case 7: u32EffAddr = pCtx->edi; break;
5073 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5074 }
5075 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5076
5077 /* add base */
5078 switch (bSib & X86_SIB_BASE_MASK)
5079 {
5080 case 0: u32EffAddr += pCtx->eax; break;
5081 case 1: u32EffAddr += pCtx->ecx; break;
5082 case 2: u32EffAddr += pCtx->edx; break;
5083 case 3: u32EffAddr += pCtx->ebx; break;
5084 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5085 case 5:
5086 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5087 {
5088 u32EffAddr += pCtx->ebp;
5089 SET_SS_DEF();
5090 }
5091 else
5092 {
5093 uint32_t u32Disp;
5094 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5095 u32EffAddr += u32Disp;
5096 }
5097 break;
5098 case 6: u32EffAddr += pCtx->esi; break;
5099 case 7: u32EffAddr += pCtx->edi; break;
5100 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5101 }
5102 break;
5103 }
5104 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5105 case 6: u32EffAddr = pCtx->esi; break;
5106 case 7: u32EffAddr = pCtx->edi; break;
5107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5108 }
5109
5110 /* Get and add the displacement. */
5111 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5112 {
5113 case 0:
5114 break;
5115 case 1:
5116 {
5117 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5118 u32EffAddr += i8Disp;
5119 break;
5120 }
5121 case 2:
5122 {
5123 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5124 u32EffAddr += u32Disp;
5125 break;
5126 }
5127 default:
5128 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5129 }
5130
5131 }
5132 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
5133 *pGCPtrEff = u32EffAddr;
5134 else
5135 {
5136 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
5137 *pGCPtrEff = u32EffAddr & UINT16_MAX;
5138 }
5139 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5140 return VINF_SUCCESS;
5141 }
5142
5143 case IEMMODE_64BIT:
5144 {
5145 uint64_t u64EffAddr;
5146
5147 /* Handle the rip+disp32 form with no registers first. */
5148 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5149 {
5150 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
5151 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
5152 }
5153 else
5154 {
5155 /* Get the register (or SIB) value. */
5156 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
5157 {
5158 case 0: u64EffAddr = pCtx->rax; break;
5159 case 1: u64EffAddr = pCtx->rcx; break;
5160 case 2: u64EffAddr = pCtx->rdx; break;
5161 case 3: u64EffAddr = pCtx->rbx; break;
5162 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
5163 case 6: u64EffAddr = pCtx->rsi; break;
5164 case 7: u64EffAddr = pCtx->rdi; break;
5165 case 8: u64EffAddr = pCtx->r8; break;
5166 case 9: u64EffAddr = pCtx->r9; break;
5167 case 10: u64EffAddr = pCtx->r10; break;
5168 case 11: u64EffAddr = pCtx->r11; break;
5169 case 13: u64EffAddr = pCtx->r13; break;
5170 case 14: u64EffAddr = pCtx->r14; break;
5171 case 15: u64EffAddr = pCtx->r15; break;
5172 /* SIB */
5173 case 4:
5174 case 12:
5175 {
5176 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5177
5178 /* Get the index and scale it. */
5179 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
5180 {
5181 case 0: u64EffAddr = pCtx->rax; break;
5182 case 1: u64EffAddr = pCtx->rcx; break;
5183 case 2: u64EffAddr = pCtx->rdx; break;
5184 case 3: u64EffAddr = pCtx->rbx; break;
5185 case 4: u64EffAddr = 0; /*none */ break;
5186 case 5: u64EffAddr = pCtx->rbp; break;
5187 case 6: u64EffAddr = pCtx->rsi; break;
5188 case 7: u64EffAddr = pCtx->rdi; break;
5189 case 8: u64EffAddr = pCtx->r8; break;
5190 case 9: u64EffAddr = pCtx->r9; break;
5191 case 10: u64EffAddr = pCtx->r10; break;
5192 case 11: u64EffAddr = pCtx->r11; break;
5193 case 12: u64EffAddr = pCtx->r12; break;
5194 case 13: u64EffAddr = pCtx->r13; break;
5195 case 14: u64EffAddr = pCtx->r14; break;
5196 case 15: u64EffAddr = pCtx->r15; break;
5197 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5198 }
5199 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5200
5201 /* add base */
5202 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
5203 {
5204 case 0: u64EffAddr += pCtx->rax; break;
5205 case 1: u64EffAddr += pCtx->rcx; break;
5206 case 2: u64EffAddr += pCtx->rdx; break;
5207 case 3: u64EffAddr += pCtx->rbx; break;
5208 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
5209 case 6: u64EffAddr += pCtx->rsi; break;
5210 case 7: u64EffAddr += pCtx->rdi; break;
5211 case 8: u64EffAddr += pCtx->r8; break;
5212 case 9: u64EffAddr += pCtx->r9; break;
5213 case 10: u64EffAddr += pCtx->r10; break;
5214 case 11: u64EffAddr += pCtx->r11; break;
5215 case 14: u64EffAddr += pCtx->r14; break;
5216 case 15: u64EffAddr += pCtx->r15; break;
5217 /* complicated encodings */
5218 case 5:
5219 case 13:
5220 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5221 {
5222 if (!pIemCpu->uRexB)
5223 {
5224 u64EffAddr += pCtx->rbp;
5225 SET_SS_DEF();
5226 }
5227 else
5228 u64EffAddr += pCtx->r13;
5229 }
5230 else
5231 {
5232 uint32_t u32Disp;
5233 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5234 u64EffAddr += (int32_t)u32Disp;
5235 }
5236 break;
5237 }
5238 break;
5239 }
5240 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5241 }
5242
5243 /* Get and add the displacement. */
5244 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5245 {
5246 case 0:
5247 break;
5248 case 1:
5249 {
5250 int8_t i8Disp;
5251 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
5252 u64EffAddr += i8Disp;
5253 break;
5254 }
5255 case 2:
5256 {
5257 uint32_t u32Disp;
5258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5259 u64EffAddr += (int32_t)u32Disp;
5260 break;
5261 }
5262 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
5263 }
5264
5265 }
5266 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
5267 *pGCPtrEff = u64EffAddr;
5268 else
5269 *pGCPtrEff = u64EffAddr & UINT16_MAX;
5270 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
5271 return VINF_SUCCESS;
5272 }
5273 }
5274
5275 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5276}
5277
5278/** @} */
5279
5280
5281
5282/*
5283 * Include the instructions
5284 */
5285#include "IEMAllInstructions.cpp.h"
5286
5287
5288
5289
5290#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
5291
5292/**
5293 * Sets up execution verification mode.
5294 */
5295static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
5296{
5297 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
5298 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
5299 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */
5300
5301#if 0
5302 // Auto enable; DSL.
5303 if ( pIemCpu->fNoRem
5304 && pOrgCtx->cs == 0x10
5305 && ( pOrgCtx->rip == 0x00100fc7
5306 || pOrgCtx->rip == 0x00100ffc
5307 || pOrgCtx->rip == 0x00100ffe
5308 )
5309 )
5310 {
5311 RTLogFlags(NULL, "enabled");
5312 pIemCpu->fNoRem = false;
5313 }
5314#endif
5315#if 0 /* auto enable on first paged protected mode interrupt */
5316 if ( pIemCpu->fNoRem
5317 && pOrgCtx->eflags.Bits.u1IF
5318 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
5319 && TRPMHasTrap(pVCpu)
5320 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
5321 {
5322 RTLogFlags(NULL, "enabled");
5323 pIemCpu->fNoRem = false;
5324 }
5325#endif
5326
5327 /*
5328 * Switch state.
5329 */
5330 if (IEM_VERIFICATION_ENABLED(pIemCpu))
5331 {
5332 static CPUMCTX s_DebugCtx; /* Ugly! */
5333
5334 s_DebugCtx = *pOrgCtx;
5335 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
5336 }
5337
5338 /*
5339 * See if there is an interrupt pending in TRPM and inject it if we can.
5340 */
5341 if ( pOrgCtx->eflags.Bits.u1IF
5342 && TRPMHasTrap(pVCpu)
5343 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
5344 {
5345 uint8_t u8TrapNo;
5346 TRPMEVENT enmType;
5347 RTGCUINT uErrCode;
5348 RTGCPTR uCr2;
5349 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
5350 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
5351 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5352 TRPMResetTrap(pVCpu);
5353 }
5354
5355 /*
5356 * Reset the counters.
5357 */
5358 pIemCpu->cIOReads = 0;
5359 pIemCpu->cIOWrites = 0;
5360 pIemCpu->fUndefinedEFlags = 0;
5361
5362 if (IEM_VERIFICATION_ENABLED(pIemCpu))
5363 {
5364 /*
5365 * Free all verification records.
5366 */
5367 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
5368 pIemCpu->pIemEvtRecHead = NULL;
5369 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
5370 do
5371 {
5372 while (pEvtRec)
5373 {
5374 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
5375 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
5376 pIemCpu->pFreeEvtRec = pEvtRec;
5377 pEvtRec = pNext;
5378 }
5379 pEvtRec = pIemCpu->pOtherEvtRecHead;
5380 pIemCpu->pOtherEvtRecHead = NULL;
5381 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
5382 } while (pEvtRec);
5383 }
5384}
5385
5386
5387/**
5388 * Allocate an event record.
5389 * @returns Poitner to a record.
5390 */
5391static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
5392{
5393 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5394 return NULL;
5395
5396 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
5397 if (pEvtRec)
5398 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
5399 else
5400 {
5401 if (!pIemCpu->ppIemEvtRecNext)
5402 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
5403
5404 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
5405 if (!pEvtRec)
5406 return NULL;
5407 }
5408 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
5409 pEvtRec->pNext = NULL;
5410 return pEvtRec;
5411}
5412
5413
5414/**
5415 * IOMMMIORead notification.
5416 */
5417VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
5418{
5419 PVMCPU pVCpu = VMMGetCpu(pVM);
5420 if (!pVCpu)
5421 return;
5422 PIEMCPU pIemCpu = &pVCpu->iem.s;
5423 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5424 if (!pEvtRec)
5425 return;
5426 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5427 pEvtRec->u.RamRead.GCPhys = GCPhys;
5428 pEvtRec->u.RamRead.cb = cbValue;
5429 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5430 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5431}
5432
5433
5434/**
5435 * IOMMMIOWrite notification.
5436 */
5437VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
5438{
5439 PVMCPU pVCpu = VMMGetCpu(pVM);
5440 if (!pVCpu)
5441 return;
5442 PIEMCPU pIemCpu = &pVCpu->iem.s;
5443 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5444 if (!pEvtRec)
5445 return;
5446 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5447 pEvtRec->u.RamWrite.GCPhys = GCPhys;
5448 pEvtRec->u.RamWrite.cb = cbValue;
5449 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
5450 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
5451 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
5452 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
5453 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5454 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5455}
5456
5457
5458/**
5459 * IOMIOPortRead notification.
5460 */
5461VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
5462{
5463 PVMCPU pVCpu = VMMGetCpu(pVM);
5464 if (!pVCpu)
5465 return;
5466 PIEMCPU pIemCpu = &pVCpu->iem.s;
5467 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5468 if (!pEvtRec)
5469 return;
5470 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
5471 pEvtRec->u.IOPortRead.Port = Port;
5472 pEvtRec->u.IOPortRead.cbValue = cbValue;
5473 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5474 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5475}
5476
5477/**
5478 * IOMIOPortWrite notification.
5479 */
5480VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
5481{
5482 PVMCPU pVCpu = VMMGetCpu(pVM);
5483 if (!pVCpu)
5484 return;
5485 PIEMCPU pIemCpu = &pVCpu->iem.s;
5486 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5487 if (!pEvtRec)
5488 return;
5489 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
5490 pEvtRec->u.IOPortWrite.Port = Port;
5491 pEvtRec->u.IOPortWrite.cbValue = cbValue;
5492 pEvtRec->u.IOPortWrite.u32Value = u32Value;
5493 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
5494 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
5495}
5496
5497
5498VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
5499{
5500 AssertFailed();
5501}
5502
5503
5504VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
5505{
5506 AssertFailed();
5507}
5508
5509
5510/**
5511 * Fakes and records an I/O port read.
5512 *
5513 * @returns VINF_SUCCESS.
5514 * @param pIemCpu The IEM per CPU data.
5515 * @param Port The I/O port.
5516 * @param pu32Value Where to store the fake value.
5517 * @param cbValue The size of the access.
5518 */
5519static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
5520{
5521 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5522 if (pEvtRec)
5523 {
5524 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
5525 pEvtRec->u.IOPortRead.Port = Port;
5526 pEvtRec->u.IOPortRead.cbValue = cbValue;
5527 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5528 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5529 }
5530 pIemCpu->cIOReads++;
5531 *pu32Value = 0xffffffff;
5532 return VINF_SUCCESS;
5533}
5534
5535
5536/**
5537 * Fakes and records an I/O port write.
5538 *
5539 * @returns VINF_SUCCESS.
5540 * @param pIemCpu The IEM per CPU data.
5541 * @param Port The I/O port.
5542 * @param u32Value The value being written.
5543 * @param cbValue The size of the access.
5544 */
5545static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
5546{
5547 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5548 if (pEvtRec)
5549 {
5550 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
5551 pEvtRec->u.IOPortWrite.Port = Port;
5552 pEvtRec->u.IOPortWrite.cbValue = cbValue;
5553 pEvtRec->u.IOPortWrite.u32Value = u32Value;
5554 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5555 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5556 }
5557 pIemCpu->cIOWrites++;
5558 return VINF_SUCCESS;
5559}
5560
5561
5562/**
5563 * Used to add extra details about a stub case.
5564 * @param pIemCpu The IEM per CPU state.
5565 */
5566static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
5567{
5568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5569 PVM pVM = IEMCPU_TO_VM(pIemCpu);
5570 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
5571 char szRegs[4096];
5572 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5573 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5574 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5575 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5576 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5577 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5578 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5579 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5580 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5581 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5582 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5583 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5584 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5585 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5586 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5587 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5588 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5589 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5590 " efer=%016VR{efer}\n"
5591 " pat=%016VR{pat}\n"
5592 " sf_mask=%016VR{sf_mask}\n"
5593 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5594 " lstar=%016VR{lstar}\n"
5595 " star=%016VR{star} cstar=%016VR{cstar}\n"
5596 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5597 );
5598
5599 char szInstr1[256];
5600 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
5601 DBGF_DISAS_FLAGS_DEFAULT_MODE,
5602 szInstr1, sizeof(szInstr1), NULL);
5603 char szInstr2[256];
5604 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
5605 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5606 szInstr2, sizeof(szInstr2), NULL);
5607
5608 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
5609}
5610
5611
5612/**
5613 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
5614 * dump to the assertion info.
5615 *
5616 * @param pEvtRec The record to dump.
5617 */
5618static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
5619{
5620 switch (pEvtRec->enmEvent)
5621 {
5622 case IEMVERIFYEVENT_IOPORT_READ:
5623 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
5624 pEvtRec->u.IOPortWrite.Port,
5625 pEvtRec->u.IOPortWrite.cbValue);
5626 break;
5627 case IEMVERIFYEVENT_IOPORT_WRITE:
5628 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
5629 pEvtRec->u.IOPortWrite.Port,
5630 pEvtRec->u.IOPortWrite.cbValue,
5631 pEvtRec->u.IOPortWrite.u32Value);
5632 break;
5633 case IEMVERIFYEVENT_RAM_READ:
5634 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
5635 pEvtRec->u.RamRead.GCPhys,
5636 pEvtRec->u.RamRead.cb);
5637 break;
5638 case IEMVERIFYEVENT_RAM_WRITE:
5639 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
5640 pEvtRec->u.RamWrite.GCPhys,
5641 pEvtRec->u.RamWrite.cb,
5642 (int)pEvtRec->u.RamWrite.cb,
5643 pEvtRec->u.RamWrite.ab);
5644 break;
5645 default:
5646 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
5647 break;
5648 }
5649}
5650
5651
5652/**
5653 * Raises an assertion on the specified record, showing the given message with
5654 * a record dump attached.
5655 *
5656 * @param pIemCpu The IEM per CPU data.
5657 * @param pEvtRec1 The first record.
5658 * @param pEvtRec2 The second record.
5659 * @param pszMsg The message explaining why we're asserting.
5660 */
5661static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
5662{
5663 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
5664 iemVerifyAssertAddRecordDump(pEvtRec1);
5665 iemVerifyAssertAddRecordDump(pEvtRec2);
5666 iemVerifyAssertMsg2(pIemCpu);
5667 RTAssertPanic();
5668}
5669
5670
5671/**
5672 * Raises an assertion on the specified record, showing the given message with
5673 * a record dump attached.
5674 *
5675 * @param pIemCpu The IEM per CPU data.
5676 * @param pEvtRec1 The first record.
5677 * @param pszMsg The message explaining why we're asserting.
5678 */
5679static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
5680{
5681 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
5682 iemVerifyAssertAddRecordDump(pEvtRec);
5683 iemVerifyAssertMsg2(pIemCpu);
5684 RTAssertPanic();
5685}
5686
5687
5688/**
5689 * Verifies a write record.
5690 *
5691 * @param pIemCpu The IEM per CPU data.
5692 * @param pEvtRec The write record.
5693 */
5694static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
5695{
5696 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
5697 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
5698 if ( RT_FAILURE(rc)
5699 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
5700 {
5701 /* fend off ins */
5702 if ( !pIemCpu->cIOReads
5703 || pEvtRec->u.RamWrite.ab[0] != 0xcc
5704 || ( pEvtRec->u.RamWrite.cb != 1
5705 && pEvtRec->u.RamWrite.cb != 2
5706 && pEvtRec->u.RamWrite.cb != 4) )
5707 {
5708 /* fend off ROMs */
5709 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
5710 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
5711 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
5712 {
5713 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
5714 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
5715 RTAssertMsg2Add("REM: %.*Rhxs\n"
5716 "IEM: %.*Rhxs\n",
5717 pEvtRec->u.RamWrite.cb, abBuf,
5718 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
5719 iemVerifyAssertAddRecordDump(pEvtRec);
5720 iemVerifyAssertMsg2(pIemCpu);
5721 RTAssertPanic();
5722 }
5723 }
5724 }
5725
5726}
5727
5728/**
5729 * Performs the post-execution verfication checks.
5730 */
5731static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
5732{
5733 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5734 return;
5735
5736 /*
5737 * Switch back the state.
5738 */
5739 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5740 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
5741 Assert(pOrgCtx != pDebugCtx);
5742 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
5743
5744 /*
5745 * Execute the instruction in REM.
5746 */
5747 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));
5748 AssertRC(rc);
5749
5750 /*
5751 * Compare the register states.
5752 */
5753 unsigned cDiffs = 0;
5754 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
5755 {
5756 Log(("REM and IEM ends up with different registers!\n"));
5757
5758# define CHECK_FIELD(a_Field) \
5759 do \
5760 { \
5761 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
5762 { \
5763 switch (sizeof(pOrgCtx->a_Field)) \
5764 { \
5765 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5766 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5767 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5768 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
5769 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
5770 } \
5771 cDiffs++; \
5772 } \
5773 } while (0)
5774
5775# define CHECK_BIT_FIELD(a_Field) \
5776 do \
5777 { \
5778 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
5779 { \
5780 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
5781 cDiffs++; \
5782 } \
5783 } while (0)
5784
5785# define CHECK_SEL(a_Sel) \
5786 do \
5787 { \
5788 CHECK_FIELD(a_Sel); \
5789 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
5790 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
5791 { \
5792 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
5793 cDiffs++; \
5794 } \
5795 CHECK_FIELD(a_Sel##Hid.u64Base); \
5796 CHECK_FIELD(a_Sel##Hid.u32Limit); \
5797 } while (0)
5798
5799 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
5800 {
5801 RTAssertMsg2Weak(" the FPU state differs\n");
5802 cDiffs++;
5803 CHECK_FIELD(fpu.FCW);
5804 CHECK_FIELD(fpu.FSW);
5805 CHECK_FIELD(fpu.FTW);
5806 CHECK_FIELD(fpu.FOP);
5807 CHECK_FIELD(fpu.FPUIP);
5808 CHECK_FIELD(fpu.CS);
5809 CHECK_FIELD(fpu.Rsrvd1);
5810 CHECK_FIELD(fpu.FPUDP);
5811 CHECK_FIELD(fpu.DS);
5812 CHECK_FIELD(fpu.Rsrvd2);
5813 CHECK_FIELD(fpu.MXCSR);
5814 CHECK_FIELD(fpu.MXCSR_MASK);
5815 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
5816 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
5817 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
5818 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
5819 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
5820 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
5821 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
5822 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
5823 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
5824 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
5825 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
5826 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
5827 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
5828 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
5829 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
5830 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
5831 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
5832 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
5833 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
5834 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
5835 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
5836 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
5837 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
5838 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
5839 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
5840 CHECK_FIELD(fpu.au32RsrvdRest[i]);
5841 }
5842 CHECK_FIELD(rip);
5843 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
5844 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
5845 {
5846 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
5847 CHECK_BIT_FIELD(rflags.Bits.u1CF);
5848 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
5849 CHECK_BIT_FIELD(rflags.Bits.u1PF);
5850 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
5851 CHECK_BIT_FIELD(rflags.Bits.u1AF);
5852 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
5853 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
5854 CHECK_BIT_FIELD(rflags.Bits.u1SF);
5855 CHECK_BIT_FIELD(rflags.Bits.u1TF);
5856 CHECK_BIT_FIELD(rflags.Bits.u1IF);
5857 CHECK_BIT_FIELD(rflags.Bits.u1DF);
5858 CHECK_BIT_FIELD(rflags.Bits.u1OF);
5859 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
5860 CHECK_BIT_FIELD(rflags.Bits.u1NT);
5861 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
5862 CHECK_BIT_FIELD(rflags.Bits.u1RF);
5863 CHECK_BIT_FIELD(rflags.Bits.u1VM);
5864 CHECK_BIT_FIELD(rflags.Bits.u1AC);
5865 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
5866 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
5867 CHECK_BIT_FIELD(rflags.Bits.u1ID);
5868 }
5869
5870 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
5871 CHECK_FIELD(rax);
5872 CHECK_FIELD(rcx);
5873 if (!pIemCpu->fIgnoreRaxRdx)
5874 CHECK_FIELD(rdx);
5875 CHECK_FIELD(rbx);
5876 CHECK_FIELD(rsp);
5877 CHECK_FIELD(rbp);
5878 CHECK_FIELD(rsi);
5879 CHECK_FIELD(rdi);
5880 CHECK_FIELD(r8);
5881 CHECK_FIELD(r9);
5882 CHECK_FIELD(r10);
5883 CHECK_FIELD(r11);
5884 CHECK_FIELD(r12);
5885 CHECK_FIELD(r13);
5886 CHECK_SEL(cs);
5887 CHECK_SEL(ss);
5888 CHECK_SEL(ds);
5889 CHECK_SEL(es);
5890 CHECK_SEL(fs);
5891 CHECK_SEL(gs);
5892 CHECK_FIELD(cr0);
5893 CHECK_FIELD(cr2);
5894 CHECK_FIELD(cr3);
5895 CHECK_FIELD(cr4);
5896 CHECK_FIELD(dr[0]);
5897 CHECK_FIELD(dr[1]);
5898 CHECK_FIELD(dr[2]);
5899 CHECK_FIELD(dr[3]);
5900 CHECK_FIELD(dr[6]);
5901 CHECK_FIELD(dr[7]);
5902 CHECK_FIELD(gdtr.cbGdt);
5903 CHECK_FIELD(gdtr.pGdt);
5904 CHECK_FIELD(idtr.cbIdt);
5905 CHECK_FIELD(idtr.pIdt);
5906 CHECK_FIELD(ldtr);
5907 CHECK_FIELD(ldtrHid.u64Base);
5908 CHECK_FIELD(ldtrHid.u32Limit);
5909 CHECK_FIELD(ldtrHid.Attr.u);
5910 CHECK_FIELD(tr);
5911 CHECK_FIELD(trHid.u64Base);
5912 CHECK_FIELD(trHid.u32Limit);
5913 CHECK_FIELD(trHid.Attr.u);
5914 CHECK_FIELD(SysEnter.cs);
5915 CHECK_FIELD(SysEnter.eip);
5916 CHECK_FIELD(SysEnter.esp);
5917 CHECK_FIELD(msrEFER);
5918 CHECK_FIELD(msrSTAR);
5919 CHECK_FIELD(msrPAT);
5920 CHECK_FIELD(msrLSTAR);
5921 CHECK_FIELD(msrCSTAR);
5922 CHECK_FIELD(msrSFMASK);
5923 CHECK_FIELD(msrKERNELGSBASE);
5924
5925 if (cDiffs != 0)
5926 {
5927 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
5928 iemVerifyAssertMsg2(pIemCpu);
5929 RTAssertPanic();
5930 }
5931# undef CHECK_FIELD
5932# undef CHECK_BIT_FIELD
5933 }
5934
5935 /*
5936 * If the register state compared fine, check the verification event
5937 * records.
5938 */
5939 if (cDiffs == 0)
5940 {
5941 /*
5942 * Compare verficiation event records.
5943 * - I/O port accesses should be a 1:1 match.
5944 */
5945 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
5946 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
5947 while (pIemRec && pOtherRec)
5948 {
5949 /* Since we might miss RAM writes and reads, ignore reads and check
5950 that any written memory is the same extra ones. */
5951 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
5952 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
5953 && pIemRec->pNext)
5954 {
5955 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
5956 iemVerifyWriteRecord(pIemCpu, pIemRec);
5957 pIemRec = pIemRec->pNext;
5958 }
5959
5960 /* Do the compare. */
5961 if (pIemRec->enmEvent != pOtherRec->enmEvent)
5962 {
5963 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
5964 break;
5965 }
5966 bool fEquals;
5967 switch (pIemRec->enmEvent)
5968 {
5969 case IEMVERIFYEVENT_IOPORT_READ:
5970 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
5971 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
5972 break;
5973 case IEMVERIFYEVENT_IOPORT_WRITE:
5974 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
5975 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
5976 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
5977 break;
5978 case IEMVERIFYEVENT_RAM_READ:
5979 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
5980 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
5981 break;
5982 case IEMVERIFYEVENT_RAM_WRITE:
5983 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
5984 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
5985 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
5986 break;
5987 default:
5988 fEquals = false;
5989 break;
5990 }
5991 if (!fEquals)
5992 {
5993 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
5994 break;
5995 }
5996
5997 /* advance */
5998 pIemRec = pIemRec->pNext;
5999 pOtherRec = pOtherRec->pNext;
6000 }
6001
6002 /* Ignore extra writes and reads. */
6003 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6004 {
6005 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6006 iemVerifyWriteRecord(pIemCpu, pIemRec);
6007 pIemRec = pIemRec->pNext;
6008 }
6009 if (pIemRec != NULL)
6010 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
6011 else if (pOtherRec != NULL)
6012 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
6013 }
6014 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6015
6016 /*
6017 * HACK ALERT! You don't normally want to verify a whole boot sequence.
6018 */
6019 if (pIemCpu->cInstructions == 1)
6020 RTLogFlags(NULL, "disabled");
6021}
6022
6023#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6024
6025/* stubs */
6026static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6027{
6028 return VERR_INTERNAL_ERROR;
6029}
6030
6031static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6032{
6033 return VERR_INTERNAL_ERROR;
6034}
6035
6036#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6037
6038
6039/**
6040 * Execute one instruction.
6041 *
6042 * @return Strict VBox status code.
6043 * @param pVCpu The current virtual CPU.
6044 */
6045VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6046{
6047 PIEMCPU pIemCpu = &pVCpu->iem.s;
6048
6049#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6050 iemExecVerificationModeSetup(pIemCpu);
6051#endif
6052#ifdef LOG_ENABLED
6053 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6054 if (LogIs2Enabled())
6055 {
6056 char szInstr[256];
6057 uint32_t cbInstr = 0;
6058 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6059 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6060 szInstr, sizeof(szInstr), &cbInstr);
6061
6062 Log2(("**** "
6063 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6064 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6065 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6066 " %s\n"
6067 ,
6068 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6069 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6070 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6071 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6072 szInstr));
6073 }
6074#endif
6075
6076 /*
6077 * Do the decoding and emulation.
6078 */
6079 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6080 if (rcStrict != VINF_SUCCESS)
6081 return rcStrict;
6082
6083 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
6084 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6085 if (rcStrict == VINF_SUCCESS)
6086 pIemCpu->cInstructions++;
6087//#ifdef DEBUG
6088// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
6089//#endif
6090
6091 /* Execute the next instruction as well if a cli, pop ss or
6092 mov ss, Gr has just completed successfully. */
6093 if ( rcStrict == VINF_SUCCESS
6094 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6095 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
6096 {
6097 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6098 if (rcStrict == VINF_SUCCESS)
6099 {
6100 b; IEM_OPCODE_GET_NEXT_U8(&b);
6101 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
6102 if (rcStrict == VINF_SUCCESS)
6103 pIemCpu->cInstructions++;
6104 }
6105 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
6106 }
6107
6108 /*
6109 * Assert some sanity.
6110 */
6111#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6112 iemExecVerificationModeCheck(pIemCpu);
6113#endif
6114 return rcStrict;
6115}
6116
6117
6118/**
6119 * Injects a trap, fault, abort, software interrupt or external interrupt.
6120 *
6121 * The parameter list matches TRPMQueryTrapAll pretty closely.
6122 *
6123 * @returns Strict VBox status code.
6124 * @param pVCpu The current virtual CPU.
6125 * @param u8TrapNo The trap number.
6126 * @param enmType What type is it (trap/fault/abort), software
6127 * interrupt or hardware interrupt.
6128 * @param uErrCode The error code if applicable.
6129 * @param uCr2 The CR2 value if applicable.
6130 */
6131VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
6132{
6133 uint32_t fFlags;
6134 switch (enmType)
6135 {
6136 case TRPM_HARDWARE_INT:
6137 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
6138 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6139 uErrCode = uCr2 = 0;
6140 break;
6141
6142 case TRPM_SOFTWARE_INT:
6143 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
6144 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6145 uErrCode = uCr2 = 0;
6146 break;
6147
6148 case TRPM_TRAP:
6149 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
6150 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6151 if (u8TrapNo == X86_XCPT_PF)
6152 fFlags |= IEM_XCPT_FLAGS_CR2;
6153 switch (u8TrapNo)
6154 {
6155 case X86_XCPT_DF:
6156 case X86_XCPT_TS:
6157 case X86_XCPT_NP:
6158 case X86_XCPT_SS:
6159 case X86_XCPT_PF:
6160 case X86_XCPT_AC:
6161 fFlags |= IEM_XCPT_FLAGS_ERR;
6162 break;
6163 }
6164 break;
6165
6166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6167 }
6168
6169 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
6170}
6171
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette