VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 40143

Last change on this file since 40143 was 40143, checked in by vboxsync, 13 years ago

fdiv - almost there...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 256.7 KB
Line 
1/* $Id: IEMAll.cpp 40143 2012-02-16 10:08:06Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 *
53 */
54
55/*******************************************************************************
56* Header Files *
57*******************************************************************************/
58#define LOG_GROUP LOG_GROUP_IEM
59#include <VBox/vmm/iem.h>
60#include <VBox/vmm/pgm.h>
61#include <VBox/vmm/iom.h>
62#include <VBox/vmm/em.h>
63#include <VBox/vmm/tm.h>
64#include <VBox/vmm/dbgf.h>
65#ifdef IEM_VERIFICATION_MODE
66# include <VBox/vmm/rem.h>
67# include <VBox/vmm/mm.h>
68#endif
69#include "IEMInternal.h"
70#include <VBox/vmm/vm.h>
71#include <VBox/log.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74#include <iprt/assert.h>
75#include <iprt/string.h>
76#include <iprt/x86.h>
77
78
79/*******************************************************************************
80* Structures and Typedefs *
81*******************************************************************************/
82/** @typedef PFNIEMOP
83 * Pointer to an opcode decoder function.
84 */
85
86/** @def FNIEMOP_DEF
87 * Define an opcode decoder function.
88 *
89 * We're using macors for this so that adding and removing parameters as well as
90 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
91 *
92 * @param a_Name The function name.
93 */
94
95
96#if defined(__GNUC__) && defined(RT_ARCH_X86)
97typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
98# define FNIEMOP_DEF(a_Name) \
99 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
100# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
101 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
102# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
103 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
104
105#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
106typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
107# define FNIEMOP_DEF(a_Name) \
108 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
109# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
110 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
111# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
112 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
113
114#elif defined(__GNUC__)
115typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
116# define FNIEMOP_DEF(a_Name) \
117 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
118# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
119 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
120# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
121 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
122
123#else
124typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
125# define FNIEMOP_DEF(a_Name) \
126 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
127# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
128 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
129# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
130 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
131
132#endif
133
134
135/**
136 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
137 */
138typedef union IEMSELDESC
139{
140 /** The legacy view. */
141 X86DESC Legacy;
142 /** The long mode view. */
143 X86DESC64 Long;
144} IEMSELDESC;
145/** Pointer to a selector descriptor table entry. */
146typedef IEMSELDESC *PIEMSELDESC;
147
148
149/*******************************************************************************
150* Defined Constants And Macros *
151*******************************************************************************/
152/** @name IEM status codes.
153 *
154 * Not quite sure how this will play out in the end, just aliasing safe status
155 * codes for now.
156 *
157 * @{ */
158#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
159/** @} */
160
161/** Temporary hack to disable the double execution. Will be removed in favor
162 * of a dedicated execution mode in EM. */
163//#define IEM_VERIFICATION_MODE_NO_REM
164
165/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
166 * due to GCC lacking knowledge about the value range of a switch. */
167#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
168
169/**
170 * Call an opcode decoder function.
171 *
172 * We're using macors for this so that adding and removing parameters can be
173 * done as we please. See FNIEMOP_DEF.
174 */
175#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
176
177/**
178 * Call a common opcode decoder function taking one extra argument.
179 *
180 * We're using macors for this so that adding and removing parameters can be
181 * done as we please. See FNIEMOP_DEF_1.
182 */
183#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
184
185/**
186 * Call a common opcode decoder function taking one extra argument.
187 *
188 * We're using macors for this so that adding and removing parameters can be
189 * done as we please. See FNIEMOP_DEF_1.
190 */
191#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
192
193/**
194 * Check if we're currently executing in real or virtual 8086 mode.
195 *
196 * @returns @c true if it is, @c false if not.
197 * @param a_pIemCpu The IEM state of the current CPU.
198 */
199#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
200
201/**
202 * Check if we're currently executing in long mode.
203 *
204 * @returns @c true if it is, @c false if not.
205 * @param a_pIemCpu The IEM state of the current CPU.
206 */
207#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
208
209/**
210 * Check if we're currently executing in real mode.
211 *
212 * @returns @c true if it is, @c false if not.
213 * @param a_pIemCpu The IEM state of the current CPU.
214 */
215#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
216
217/**
218 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
219 */
220#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
221
222/**
223 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
224 */
225#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
226
227/**
228 * Tests if at least on of the specified AMD CPUID features (extended) are
229 * marked present.
230 */
231#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
232
233/**
234 * Checks if a intel CPUID feature is present.
235 */
236#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
237 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
238 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
239
240/**
241 * Check if the address is canonical.
242 */
243#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
244
245
246/*******************************************************************************
247* Global Variables *
248*******************************************************************************/
249extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
250
251
252/** Function table for the ADD instruction. */
253static const IEMOPBINSIZES g_iemAImpl_add =
254{
255 iemAImpl_add_u8, iemAImpl_add_u8_locked,
256 iemAImpl_add_u16, iemAImpl_add_u16_locked,
257 iemAImpl_add_u32, iemAImpl_add_u32_locked,
258 iemAImpl_add_u64, iemAImpl_add_u64_locked
259};
260
261/** Function table for the ADC instruction. */
262static const IEMOPBINSIZES g_iemAImpl_adc =
263{
264 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
265 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
266 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
267 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
268};
269
270/** Function table for the SUB instruction. */
271static const IEMOPBINSIZES g_iemAImpl_sub =
272{
273 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
274 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
275 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
276 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
277};
278
279/** Function table for the SBB instruction. */
280static const IEMOPBINSIZES g_iemAImpl_sbb =
281{
282 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
283 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
284 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
285 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
286};
287
288/** Function table for the OR instruction. */
289static const IEMOPBINSIZES g_iemAImpl_or =
290{
291 iemAImpl_or_u8, iemAImpl_or_u8_locked,
292 iemAImpl_or_u16, iemAImpl_or_u16_locked,
293 iemAImpl_or_u32, iemAImpl_or_u32_locked,
294 iemAImpl_or_u64, iemAImpl_or_u64_locked
295};
296
297/** Function table for the XOR instruction. */
298static const IEMOPBINSIZES g_iemAImpl_xor =
299{
300 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
301 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
302 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
303 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
304};
305
306/** Function table for the AND instruction. */
307static const IEMOPBINSIZES g_iemAImpl_and =
308{
309 iemAImpl_and_u8, iemAImpl_and_u8_locked,
310 iemAImpl_and_u16, iemAImpl_and_u16_locked,
311 iemAImpl_and_u32, iemAImpl_and_u32_locked,
312 iemAImpl_and_u64, iemAImpl_and_u64_locked
313};
314
315/** Function table for the CMP instruction.
316 * @remarks Making operand order ASSUMPTIONS.
317 */
318static const IEMOPBINSIZES g_iemAImpl_cmp =
319{
320 iemAImpl_cmp_u8, NULL,
321 iemAImpl_cmp_u16, NULL,
322 iemAImpl_cmp_u32, NULL,
323 iemAImpl_cmp_u64, NULL
324};
325
326/** Function table for the TEST instruction.
327 * @remarks Making operand order ASSUMPTIONS.
328 */
329static const IEMOPBINSIZES g_iemAImpl_test =
330{
331 iemAImpl_test_u8, NULL,
332 iemAImpl_test_u16, NULL,
333 iemAImpl_test_u32, NULL,
334 iemAImpl_test_u64, NULL
335};
336
337/** Function table for the BT instruction. */
338static const IEMOPBINSIZES g_iemAImpl_bt =
339{
340 NULL, NULL,
341 iemAImpl_bt_u16, NULL,
342 iemAImpl_bt_u32, NULL,
343 iemAImpl_bt_u64, NULL
344};
345
346/** Function table for the BTC instruction. */
347static const IEMOPBINSIZES g_iemAImpl_btc =
348{
349 NULL, NULL,
350 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
351 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
352 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
353};
354
355/** Function table for the BTR instruction. */
356static const IEMOPBINSIZES g_iemAImpl_btr =
357{
358 NULL, NULL,
359 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
360 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
361 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
362};
363
364/** Function table for the BTS instruction. */
365static const IEMOPBINSIZES g_iemAImpl_bts =
366{
367 NULL, NULL,
368 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
369 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
370 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
371};
372
373/** Function table for the BSF instruction. */
374static const IEMOPBINSIZES g_iemAImpl_bsf =
375{
376 NULL, NULL,
377 iemAImpl_bsf_u16, NULL,
378 iemAImpl_bsf_u32, NULL,
379 iemAImpl_bsf_u64, NULL
380};
381
382/** Function table for the BSR instruction. */
383static const IEMOPBINSIZES g_iemAImpl_bsr =
384{
385 NULL, NULL,
386 iemAImpl_bsr_u16, NULL,
387 iemAImpl_bsr_u32, NULL,
388 iemAImpl_bsr_u64, NULL
389};
390
391/** Function table for the IMUL instruction. */
392static const IEMOPBINSIZES g_iemAImpl_imul_two =
393{
394 NULL, NULL,
395 iemAImpl_imul_two_u16, NULL,
396 iemAImpl_imul_two_u32, NULL,
397 iemAImpl_imul_two_u64, NULL
398};
399
400/** Group 1 /r lookup table. */
401static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
402{
403 &g_iemAImpl_add,
404 &g_iemAImpl_or,
405 &g_iemAImpl_adc,
406 &g_iemAImpl_sbb,
407 &g_iemAImpl_and,
408 &g_iemAImpl_sub,
409 &g_iemAImpl_xor,
410 &g_iemAImpl_cmp
411};
412
413/** Function table for the INC instruction. */
414static const IEMOPUNARYSIZES g_iemAImpl_inc =
415{
416 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
417 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
418 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
419 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
420};
421
422/** Function table for the DEC instruction. */
423static const IEMOPUNARYSIZES g_iemAImpl_dec =
424{
425 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
426 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
427 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
428 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
429};
430
431/** Function table for the NEG instruction. */
432static const IEMOPUNARYSIZES g_iemAImpl_neg =
433{
434 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
435 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
436 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
437 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
438};
439
440/** Function table for the NOT instruction. */
441static const IEMOPUNARYSIZES g_iemAImpl_not =
442{
443 iemAImpl_not_u8, iemAImpl_not_u8_locked,
444 iemAImpl_not_u16, iemAImpl_not_u16_locked,
445 iemAImpl_not_u32, iemAImpl_not_u32_locked,
446 iemAImpl_not_u64, iemAImpl_not_u64_locked
447};
448
449
450/** Function table for the ROL instruction. */
451static const IEMOPSHIFTSIZES g_iemAImpl_rol =
452{
453 iemAImpl_rol_u8,
454 iemAImpl_rol_u16,
455 iemAImpl_rol_u32,
456 iemAImpl_rol_u64
457};
458
459/** Function table for the ROR instruction. */
460static const IEMOPSHIFTSIZES g_iemAImpl_ror =
461{
462 iemAImpl_ror_u8,
463 iemAImpl_ror_u16,
464 iemAImpl_ror_u32,
465 iemAImpl_ror_u64
466};
467
468/** Function table for the RCL instruction. */
469static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
470{
471 iemAImpl_rcl_u8,
472 iemAImpl_rcl_u16,
473 iemAImpl_rcl_u32,
474 iemAImpl_rcl_u64
475};
476
477/** Function table for the RCR instruction. */
478static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
479{
480 iemAImpl_rcr_u8,
481 iemAImpl_rcr_u16,
482 iemAImpl_rcr_u32,
483 iemAImpl_rcr_u64
484};
485
486/** Function table for the SHL instruction. */
487static const IEMOPSHIFTSIZES g_iemAImpl_shl =
488{
489 iemAImpl_shl_u8,
490 iemAImpl_shl_u16,
491 iemAImpl_shl_u32,
492 iemAImpl_shl_u64
493};
494
495/** Function table for the SHR instruction. */
496static const IEMOPSHIFTSIZES g_iemAImpl_shr =
497{
498 iemAImpl_shr_u8,
499 iemAImpl_shr_u16,
500 iemAImpl_shr_u32,
501 iemAImpl_shr_u64
502};
503
504/** Function table for the SAR instruction. */
505static const IEMOPSHIFTSIZES g_iemAImpl_sar =
506{
507 iemAImpl_sar_u8,
508 iemAImpl_sar_u16,
509 iemAImpl_sar_u32,
510 iemAImpl_sar_u64
511};
512
513
514/** Function table for the MUL instruction. */
515static const IEMOPMULDIVSIZES g_iemAImpl_mul =
516{
517 iemAImpl_mul_u8,
518 iemAImpl_mul_u16,
519 iemAImpl_mul_u32,
520 iemAImpl_mul_u64
521};
522
523/** Function table for the IMUL instruction working implicitly on rAX. */
524static const IEMOPMULDIVSIZES g_iemAImpl_imul =
525{
526 iemAImpl_imul_u8,
527 iemAImpl_imul_u16,
528 iemAImpl_imul_u32,
529 iemAImpl_imul_u64
530};
531
532/** Function table for the DIV instruction. */
533static const IEMOPMULDIVSIZES g_iemAImpl_div =
534{
535 iemAImpl_div_u8,
536 iemAImpl_div_u16,
537 iemAImpl_div_u32,
538 iemAImpl_div_u64
539};
540
541/** Function table for the MUL instruction. */
542static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
543{
544 iemAImpl_idiv_u8,
545 iemAImpl_idiv_u16,
546 iemAImpl_idiv_u32,
547 iemAImpl_idiv_u64
548};
549
550/** Function table for the SHLD instruction */
551static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
552{
553 iemAImpl_shld_u16,
554 iemAImpl_shld_u32,
555 iemAImpl_shld_u64,
556};
557
558/** Function table for the SHRD instruction */
559static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
560{
561 iemAImpl_shrd_u16,
562 iemAImpl_shrd_u32,
563 iemAImpl_shrd_u64,
564};
565
566
567/*******************************************************************************
568* Internal Functions *
569*******************************************************************************/
570static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
571/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
572static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
573static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
574static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
575static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
576static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
577static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
578static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
579static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
580static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
581static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
582static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
583static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
584static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
585static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
586static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
587static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
588static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
589static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
590static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
591static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
592static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
593static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
594
595#ifdef IEM_VERIFICATION_MODE
596static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
597#endif
598static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
599static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
600
601
602/**
603 * Initializes the decoder state.
604 *
605 * @param pIemCpu The per CPU IEM state.
606 */
607DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu)
608{
609 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
610
611 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
612 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
613 ? IEMMODE_64BIT
614 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
615 ? IEMMODE_32BIT
616 : IEMMODE_16BIT;
617 pIemCpu->enmCpuMode = enmMode;
618 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
619 pIemCpu->enmEffAddrMode = enmMode;
620 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
621 pIemCpu->enmEffOpSize = enmMode;
622 pIemCpu->fPrefixes = 0;
623 pIemCpu->uRexReg = 0;
624 pIemCpu->uRexB = 0;
625 pIemCpu->uRexIndex = 0;
626 pIemCpu->iEffSeg = X86_SREG_DS;
627 pIemCpu->offOpcode = 0;
628 pIemCpu->cbOpcode = 0;
629 pIemCpu->cActiveMappings = 0;
630 pIemCpu->iNextMapping = 0;
631}
632
633
634/**
635 * Prefetch opcodes the first time when starting executing.
636 *
637 * @returns Strict VBox status code.
638 * @param pIemCpu The IEM state.
639 */
640static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
641{
642#ifdef IEM_VERIFICATION_MODE
643 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
644#endif
645 iemInitDecoder(pIemCpu);
646
647 /*
648 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
649 *
650 * First translate CS:rIP to a physical address.
651 */
652 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
653 uint32_t cbToTryRead;
654 RTGCPTR GCPtrPC;
655 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
656 {
657 cbToTryRead = PAGE_SIZE;
658 GCPtrPC = pCtx->rip;
659 if (!IEM_IS_CANONICAL(GCPtrPC))
660 return iemRaiseGeneralProtectionFault0(pIemCpu);
661 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
662 }
663 else
664 {
665 uint32_t GCPtrPC32 = pCtx->eip;
666 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
667 if (GCPtrPC32 > pCtx->csHid.u32Limit)
668 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
669 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
670 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;
671 }
672
673 RTGCPHYS GCPhys;
674 uint64_t fFlags;
675 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
676 if (RT_FAILURE(rc))
677 {
678 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
679 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
680 }
681 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
682 {
683 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
684 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
685 }
686 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
687 {
688 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
689 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
690 }
691 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
692 /** @todo Check reserved bits and such stuff. PGM is better at doing
693 * that, so do it when implementing the guest virtual address
694 * TLB... */
695
696#ifdef IEM_VERIFICATION_MODE
697 /*
698 * Optimistic optimization: Use unconsumed opcode bytes from the previous
699 * instruction.
700 */
701 /** @todo optimize this differently by not using PGMPhysRead. */
702 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
703 pIemCpu->GCPhysOpcodes = GCPhys;
704 if ( offPrevOpcodes < cbOldOpcodes
705 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
706 {
707 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
708 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
709 pIemCpu->cbOpcode = cbNew;
710 return VINF_SUCCESS;
711 }
712#endif
713
714 /*
715 * Read the bytes at this address.
716 */
717 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
718 if (cbToTryRead > cbLeftOnPage)
719 cbToTryRead = cbLeftOnPage;
720 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
721 cbToTryRead = sizeof(pIemCpu->abOpcode);
722 /** @todo patch manager */
723 if (!pIemCpu->fByPassHandlers)
724 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
725 else
726 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
727 if (rc != VINF_SUCCESS)
728 {
729 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
730 return rc;
731 }
732 pIemCpu->cbOpcode = cbToTryRead;
733
734 return VINF_SUCCESS;
735}
736
737
738/**
739 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
740 * exception if it fails.
741 *
742 * @returns Strict VBox status code.
743 * @param pIemCpu The IEM state.
744 * @param cbMin Where to return the opcode byte.
745 */
746static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
747{
748 /*
749 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
750 *
751 * First translate CS:rIP to a physical address.
752 */
753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
754 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
755 uint32_t cbToTryRead;
756 RTGCPTR GCPtrNext;
757 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
758 {
759 cbToTryRead = PAGE_SIZE;
760 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
761 if (!IEM_IS_CANONICAL(GCPtrNext))
762 return iemRaiseGeneralProtectionFault0(pIemCpu);
763 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
764 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
765 }
766 else
767 {
768 uint32_t GCPtrNext32 = pCtx->eip;
769 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
770 GCPtrNext32 += pIemCpu->cbOpcode;
771 if (GCPtrNext32 > pCtx->csHid.u32Limit)
772 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
773 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
774 if (cbToTryRead < cbMin - cbLeft)
775 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
776 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;
777 }
778
779 RTGCPHYS GCPhys;
780 uint64_t fFlags;
781 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
782 if (RT_FAILURE(rc))
783 {
784 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
785 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
786 }
787 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
788 {
789 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
790 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
791 }
792 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
793 {
794 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
795 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
796 }
797 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
798 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
799 /** @todo Check reserved bits and such stuff. PGM is better at doing
800 * that, so do it when implementing the guest virtual address
801 * TLB... */
802
803 /*
804 * Read the bytes at this address.
805 */
806 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
807 if (cbToTryRead > cbLeftOnPage)
808 cbToTryRead = cbLeftOnPage;
809 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
810 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
811 Assert(cbToTryRead >= cbMin - cbLeft);
812 if (!pIemCpu->fByPassHandlers)
813 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
814 else
815 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
816 if (rc != VINF_SUCCESS)
817 {
818 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
819 return rc;
820 }
821 pIemCpu->cbOpcode += cbToTryRead;
822 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
823
824 return VINF_SUCCESS;
825}
826
827
828/**
829 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
830 *
831 * @returns Strict VBox status code.
832 * @param pIemCpu The IEM state.
833 * @param pb Where to return the opcode byte.
834 */
835DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
836{
837 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
838 if (rcStrict == VINF_SUCCESS)
839 {
840 uint8_t offOpcode = pIemCpu->offOpcode;
841 *pb = pIemCpu->abOpcode[offOpcode];
842 pIemCpu->offOpcode = offOpcode + 1;
843 }
844 else
845 *pb = 0;
846 return rcStrict;
847}
848
849
850/**
851 * Fetches the next opcode byte.
852 *
853 * @returns Strict VBox status code.
854 * @param pIemCpu The IEM state.
855 * @param pu8 Where to return the opcode byte.
856 */
857DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
858{
859 uint8_t const offOpcode = pIemCpu->offOpcode;
860 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
861 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
862
863 *pu8 = pIemCpu->abOpcode[offOpcode];
864 pIemCpu->offOpcode = offOpcode + 1;
865 return VINF_SUCCESS;
866}
867
868
869/**
870 * Fetches the next opcode byte, returns automatically on failure.
871 *
872 * @param a_pu8 Where to return the opcode byte.
873 * @remark Implicitly references pIemCpu.
874 */
875#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
876 do \
877 { \
878 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
879 if (rcStrict2 != VINF_SUCCESS) \
880 return rcStrict2; \
881 } while (0)
882
883
884/**
885 * Fetches the next signed byte from the opcode stream.
886 *
887 * @returns Strict VBox status code.
888 * @param pIemCpu The IEM state.
889 * @param pi8 Where to return the signed byte.
890 */
891DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
892{
893 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
894}
895
896
897/**
898 * Fetches the next signed byte from the opcode stream, returning automatically
899 * on failure.
900 *
901 * @param pi8 Where to return the signed byte.
902 * @remark Implicitly references pIemCpu.
903 */
904#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
905 do \
906 { \
907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
908 if (rcStrict2 != VINF_SUCCESS) \
909 return rcStrict2; \
910 } while (0)
911
912
913/**
914 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
915 *
916 * @returns Strict VBox status code.
917 * @param pIemCpu The IEM state.
918 * @param pu16 Where to return the opcode dword.
919 */
920DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
921{
922 uint8_t u8;
923 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
924 if (rcStrict == VINF_SUCCESS)
925 *pu16 = (int8_t)u8;
926 return rcStrict;
927}
928
929
930/**
931 * Fetches the next signed byte from the opcode stream, extending it to
932 * unsigned 16-bit.
933 *
934 * @returns Strict VBox status code.
935 * @param pIemCpu The IEM state.
936 * @param pu16 Where to return the unsigned word.
937 */
938DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
939{
940 uint8_t const offOpcode = pIemCpu->offOpcode;
941 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
942 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
943
944 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
945 pIemCpu->offOpcode = offOpcode + 1;
946 return VINF_SUCCESS;
947}
948
949
950/**
951 * Fetches the next signed byte from the opcode stream and sign-extending it to
952 * a word, returning automatically on failure.
953 *
954 * @param pu16 Where to return the word.
955 * @remark Implicitly references pIemCpu.
956 */
957#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
958 do \
959 { \
960 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
961 if (rcStrict2 != VINF_SUCCESS) \
962 return rcStrict2; \
963 } while (0)
964
965
966/**
967 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
968 *
969 * @returns Strict VBox status code.
970 * @param pIemCpu The IEM state.
971 * @param pu16 Where to return the opcode word.
972 */
973DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
974{
975 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
976 if (rcStrict == VINF_SUCCESS)
977 {
978 uint8_t offOpcode = pIemCpu->offOpcode;
979 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
980 pIemCpu->offOpcode = offOpcode + 2;
981 }
982 else
983 *pu16 = 0;
984 return rcStrict;
985}
986
987
988/**
989 * Fetches the next opcode word.
990 *
991 * @returns Strict VBox status code.
992 * @param pIemCpu The IEM state.
993 * @param pu16 Where to return the opcode word.
994 */
995DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
996{
997 uint8_t const offOpcode = pIemCpu->offOpcode;
998 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
999 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1000
1001 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1002 pIemCpu->offOpcode = offOpcode + 2;
1003 return VINF_SUCCESS;
1004}
1005
1006
1007/**
1008 * Fetches the next opcode word, returns automatically on failure.
1009 *
1010 * @param a_pu16 Where to return the opcode word.
1011 * @remark Implicitly references pIemCpu.
1012 */
1013#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1014 do \
1015 { \
1016 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1017 if (rcStrict2 != VINF_SUCCESS) \
1018 return rcStrict2; \
1019 } while (0)
1020
1021
1022/**
1023 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1024 *
1025 * @returns Strict VBox status code.
1026 * @param pIemCpu The IEM state.
1027 * @param pu32 Where to return the opcode double word.
1028 */
1029DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1030{
1031 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1032 if (rcStrict == VINF_SUCCESS)
1033 {
1034 uint8_t offOpcode = pIemCpu->offOpcode;
1035 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1036 pIemCpu->offOpcode = offOpcode + 2;
1037 }
1038 else
1039 *pu32 = 0;
1040 return rcStrict;
1041}
1042
1043
1044/**
1045 * Fetches the next opcode word, zero extending it to a double word.
1046 *
1047 * @returns Strict VBox status code.
1048 * @param pIemCpu The IEM state.
1049 * @param pu32 Where to return the opcode double word.
1050 */
1051DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1052{
1053 uint8_t const offOpcode = pIemCpu->offOpcode;
1054 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1055 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1056
1057 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1058 pIemCpu->offOpcode = offOpcode + 2;
1059 return VINF_SUCCESS;
1060}
1061
1062
1063/**
1064 * Fetches the next opcode word and zero extends it to a double word, returns
1065 * automatically on failure.
1066 *
1067 * @param a_pu32 Where to return the opcode double word.
1068 * @remark Implicitly references pIemCpu.
1069 */
1070#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1071 do \
1072 { \
1073 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1074 if (rcStrict2 != VINF_SUCCESS) \
1075 return rcStrict2; \
1076 } while (0)
1077
1078
1079/**
1080 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1081 *
1082 * @returns Strict VBox status code.
1083 * @param pIemCpu The IEM state.
1084 * @param pu64 Where to return the opcode quad word.
1085 */
1086DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1087{
1088 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1089 if (rcStrict == VINF_SUCCESS)
1090 {
1091 uint8_t offOpcode = pIemCpu->offOpcode;
1092 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1093 pIemCpu->offOpcode = offOpcode + 2;
1094 }
1095 else
1096 *pu64 = 0;
1097 return rcStrict;
1098}
1099
1100
1101/**
1102 * Fetches the next opcode word, zero extending it to a quad word.
1103 *
1104 * @returns Strict VBox status code.
1105 * @param pIemCpu The IEM state.
1106 * @param pu64 Where to return the opcode quad word.
1107 */
1108DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1109{
1110 uint8_t const offOpcode = pIemCpu->offOpcode;
1111 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1112 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1113
1114 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1115 pIemCpu->offOpcode = offOpcode + 2;
1116 return VINF_SUCCESS;
1117}
1118
1119
1120/**
1121 * Fetches the next opcode word and zero extends it to a quad word, returns
1122 * automatically on failure.
1123 *
1124 * @param a_pu64 Where to return the opcode quad word.
1125 * @remark Implicitly references pIemCpu.
1126 */
1127#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1128 do \
1129 { \
1130 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1131 if (rcStrict2 != VINF_SUCCESS) \
1132 return rcStrict2; \
1133 } while (0)
1134
1135
1136/**
1137 * Fetches the next signed word from the opcode stream.
1138 *
1139 * @returns Strict VBox status code.
1140 * @param pIemCpu The IEM state.
1141 * @param pi16 Where to return the signed word.
1142 */
1143DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1144{
1145 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1146}
1147
1148
1149/**
1150 * Fetches the next signed word from the opcode stream, returning automatically
1151 * on failure.
1152 *
1153 * @param pi16 Where to return the signed word.
1154 * @remark Implicitly references pIemCpu.
1155 */
1156#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1157 do \
1158 { \
1159 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1160 if (rcStrict2 != VINF_SUCCESS) \
1161 return rcStrict2; \
1162 } while (0)
1163
1164
1165/**
1166 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1167 *
1168 * @returns Strict VBox status code.
1169 * @param pIemCpu The IEM state.
1170 * @param pu32 Where to return the opcode dword.
1171 */
1172DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1173{
1174 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1175 if (rcStrict == VINF_SUCCESS)
1176 {
1177 uint8_t offOpcode = pIemCpu->offOpcode;
1178 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1179 pIemCpu->abOpcode[offOpcode + 1],
1180 pIemCpu->abOpcode[offOpcode + 2],
1181 pIemCpu->abOpcode[offOpcode + 3]);
1182 pIemCpu->offOpcode = offOpcode + 4;
1183 }
1184 else
1185 *pu32 = 0;
1186 return rcStrict;
1187}
1188
1189
1190/**
1191 * Fetches the next opcode dword.
1192 *
1193 * @returns Strict VBox status code.
1194 * @param pIemCpu The IEM state.
1195 * @param pu32 Where to return the opcode double word.
1196 */
1197DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1198{
1199 uint8_t const offOpcode = pIemCpu->offOpcode;
1200 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1201 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1202
1203 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1204 pIemCpu->abOpcode[offOpcode + 1],
1205 pIemCpu->abOpcode[offOpcode + 2],
1206 pIemCpu->abOpcode[offOpcode + 3]);
1207 pIemCpu->offOpcode = offOpcode + 4;
1208 return VINF_SUCCESS;
1209}
1210
1211
1212/**
1213 * Fetches the next opcode dword, returns automatically on failure.
1214 *
1215 * @param a_pu32 Where to return the opcode dword.
1216 * @remark Implicitly references pIemCpu.
1217 */
1218#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1219 do \
1220 { \
1221 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1222 if (rcStrict2 != VINF_SUCCESS) \
1223 return rcStrict2; \
1224 } while (0)
1225
1226
1227/**
1228 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1229 *
1230 * @returns Strict VBox status code.
1231 * @param pIemCpu The IEM state.
1232 * @param pu32 Where to return the opcode dword.
1233 */
1234DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1235{
1236 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1237 if (rcStrict == VINF_SUCCESS)
1238 {
1239 uint8_t offOpcode = pIemCpu->offOpcode;
1240 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1241 pIemCpu->abOpcode[offOpcode + 1],
1242 pIemCpu->abOpcode[offOpcode + 2],
1243 pIemCpu->abOpcode[offOpcode + 3]);
1244 pIemCpu->offOpcode = offOpcode + 4;
1245 }
1246 else
1247 *pu64 = 0;
1248 return rcStrict;
1249}
1250
1251
1252/**
1253 * Fetches the next opcode dword, zero extending it to a quad word.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pu64 Where to return the opcode quad word.
1258 */
1259DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1260{
1261 uint8_t const offOpcode = pIemCpu->offOpcode;
1262 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1263 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1264
1265 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1266 pIemCpu->abOpcode[offOpcode + 1],
1267 pIemCpu->abOpcode[offOpcode + 2],
1268 pIemCpu->abOpcode[offOpcode + 3]);
1269 pIemCpu->offOpcode = offOpcode + 4;
1270 return VINF_SUCCESS;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode dword and zero extends it to a quad word, returns
1276 * automatically on failure.
1277 *
1278 * @param a_pu64 Where to return the opcode quad word.
1279 * @remark Implicitly references pIemCpu.
1280 */
1281#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1282 do \
1283 { \
1284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1285 if (rcStrict2 != VINF_SUCCESS) \
1286 return rcStrict2; \
1287 } while (0)
1288
1289
1290/**
1291 * Fetches the next signed double word from the opcode stream.
1292 *
1293 * @returns Strict VBox status code.
1294 * @param pIemCpu The IEM state.
1295 * @param pi32 Where to return the signed double word.
1296 */
1297DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1298{
1299 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1300}
1301
1302/**
1303 * Fetches the next signed double word from the opcode stream, returning
1304 * automatically on failure.
1305 *
1306 * @param pi32 Where to return the signed double word.
1307 * @remark Implicitly references pIemCpu.
1308 */
1309#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1310 do \
1311 { \
1312 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1313 if (rcStrict2 != VINF_SUCCESS) \
1314 return rcStrict2; \
1315 } while (0)
1316
1317
1318/**
1319 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1320 *
1321 * @returns Strict VBox status code.
1322 * @param pIemCpu The IEM state.
1323 * @param pu64 Where to return the opcode qword.
1324 */
1325DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1326{
1327 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1328 if (rcStrict == VINF_SUCCESS)
1329 {
1330 uint8_t offOpcode = pIemCpu->offOpcode;
1331 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1332 pIemCpu->abOpcode[offOpcode + 1],
1333 pIemCpu->abOpcode[offOpcode + 2],
1334 pIemCpu->abOpcode[offOpcode + 3]);
1335 pIemCpu->offOpcode = offOpcode + 4;
1336 }
1337 else
1338 *pu64 = 0;
1339 return rcStrict;
1340}
1341
1342
1343/**
1344 * Fetches the next opcode dword, sign extending it into a quad word.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pIemCpu The IEM state.
1348 * @param pu64 Where to return the opcode quad word.
1349 */
1350DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1351{
1352 uint8_t const offOpcode = pIemCpu->offOpcode;
1353 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1354 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1355
1356 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1357 pIemCpu->abOpcode[offOpcode + 1],
1358 pIemCpu->abOpcode[offOpcode + 2],
1359 pIemCpu->abOpcode[offOpcode + 3]);
1360 *pu64 = i32;
1361 pIemCpu->offOpcode = offOpcode + 4;
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Fetches the next opcode double word and sign extends it to a quad word,
1368 * returns automatically on failure.
1369 *
1370 * @param a_pu64 Where to return the opcode quad word.
1371 * @remark Implicitly references pIemCpu.
1372 */
1373#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1374 do \
1375 { \
1376 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1377 if (rcStrict2 != VINF_SUCCESS) \
1378 return rcStrict2; \
1379 } while (0)
1380
1381
1382/**
1383 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1384 *
1385 * @returns Strict VBox status code.
1386 * @param pIemCpu The IEM state.
1387 * @param pu64 Where to return the opcode qword.
1388 */
1389DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1390{
1391 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1392 if (rcStrict == VINF_SUCCESS)
1393 {
1394 uint8_t offOpcode = pIemCpu->offOpcode;
1395 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1396 pIemCpu->abOpcode[offOpcode + 1],
1397 pIemCpu->abOpcode[offOpcode + 2],
1398 pIemCpu->abOpcode[offOpcode + 3],
1399 pIemCpu->abOpcode[offOpcode + 4],
1400 pIemCpu->abOpcode[offOpcode + 5],
1401 pIemCpu->abOpcode[offOpcode + 6],
1402 pIemCpu->abOpcode[offOpcode + 7]);
1403 pIemCpu->offOpcode = offOpcode + 8;
1404 }
1405 else
1406 *pu64 = 0;
1407 return rcStrict;
1408}
1409
1410
1411/**
1412 * Fetches the next opcode qword.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pIemCpu The IEM state.
1416 * @param pu64 Where to return the opcode qword.
1417 */
1418DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1419{
1420 uint8_t const offOpcode = pIemCpu->offOpcode;
1421 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1422 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1423
1424 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1425 pIemCpu->abOpcode[offOpcode + 1],
1426 pIemCpu->abOpcode[offOpcode + 2],
1427 pIemCpu->abOpcode[offOpcode + 3],
1428 pIemCpu->abOpcode[offOpcode + 4],
1429 pIemCpu->abOpcode[offOpcode + 5],
1430 pIemCpu->abOpcode[offOpcode + 6],
1431 pIemCpu->abOpcode[offOpcode + 7]);
1432 pIemCpu->offOpcode = offOpcode + 8;
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * Fetches the next opcode quad word, returns automatically on failure.
1439 *
1440 * @param a_pu64 Where to return the opcode quad word.
1441 * @remark Implicitly references pIemCpu.
1442 */
1443#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1444 do \
1445 { \
1446 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1447 if (rcStrict2 != VINF_SUCCESS) \
1448 return rcStrict2; \
1449 } while (0)
1450
1451
1452/** @name Misc Worker Functions.
1453 * @{
1454 */
1455
1456
1457/**
1458 * Validates a new SS segment.
1459 *
1460 * @returns VBox strict status code.
1461 * @param pIemCpu The IEM per CPU instance data.
1462 * @param pCtx The CPU context.
1463 * @param NewSS The new SS selctor.
1464 * @param uCpl The CPL to load the stack for.
1465 * @param pDesc Where to return the descriptor.
1466 */
1467static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1468{
1469 NOREF(pCtx);
1470
1471 /* Null selectors are not allowed (we're not called for dispatching
1472 interrupts with SS=0 in long mode). */
1473 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
1474 {
1475 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1476 return iemRaiseGeneralProtectionFault0(pIemCpu);
1477 }
1478
1479 /*
1480 * Read the descriptor.
1481 */
1482 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1483 if (rcStrict != VINF_SUCCESS)
1484 return rcStrict;
1485
1486 /*
1487 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1488 */
1489 if (!pDesc->Legacy.Gen.u1DescType)
1490 {
1491 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1493 }
1494
1495 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1496 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1497 {
1498 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1499 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1500 }
1501 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1502 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1503 {
1504 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1505 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1506 }
1507 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1508 if ((NewSS & X86_SEL_RPL) != uCpl)
1509 {
1510 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1511 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1512 }
1513 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1514 {
1515 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1516 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1517 }
1518
1519 /* Is it there? */
1520 /** @todo testcase: Is this checked before the canonical / limit check below? */
1521 if (!pDesc->Legacy.Gen.u1Present)
1522 {
1523 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1524 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1525 }
1526
1527 return VINF_SUCCESS;
1528}
1529
1530
1531/** @} */
1532
1533/** @name Raising Exceptions.
1534 *
1535 * @{
1536 */
1537
1538/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1539 * @{ */
1540/** CPU exception. */
1541#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1542/** External interrupt (from PIC, APIC, whatever). */
1543#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1544/** Software interrupt (int, into or bound). */
1545#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1546/** Takes an error code. */
1547#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1548/** Takes a CR2. */
1549#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1550/** Generated by the breakpoint instruction. */
1551#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1552/** @} */
1553
1554/**
1555 * Loads the specified stack far pointer from the TSS.
1556 *
1557 * @returns VBox strict status code.
1558 * @param pIemCpu The IEM per CPU instance data.
1559 * @param pCtx The CPU context.
1560 * @param uCpl The CPL to load the stack for.
1561 * @param pSelSS Where to return the new stack segment.
1562 * @param puEsp Where to return the new stack pointer.
1563 */
1564static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1565 PRTSEL pSelSS, uint32_t *puEsp)
1566{
1567 VBOXSTRICTRC rcStrict;
1568 Assert(uCpl < 4);
1569 *puEsp = 0; /* make gcc happy */
1570 *pSelSS = 0; /* make gcc happy */
1571
1572 switch (pCtx->trHid.Attr.n.u4Type)
1573 {
1574 /*
1575 * 16-bit TSS (X86TSS16).
1576 */
1577 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1578 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1579 {
1580 uint32_t off = uCpl * 4 + 2;
1581 if (off + 4 > pCtx->trHid.u32Limit)
1582 {
1583 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1584 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1585 }
1586
1587 uint32_t u32Tmp = 0; /* gcc maybe... */
1588 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1589 if (rcStrict == VINF_SUCCESS)
1590 {
1591 *puEsp = RT_LOWORD(u32Tmp);
1592 *pSelSS = RT_HIWORD(u32Tmp);
1593 return VINF_SUCCESS;
1594 }
1595 break;
1596 }
1597
1598 /*
1599 * 32-bit TSS (X86TSS32).
1600 */
1601 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1602 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1603 {
1604 uint32_t off = uCpl * 8 + 4;
1605 if (off + 7 > pCtx->trHid.u32Limit)
1606 {
1607 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
1608 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1609 }
1610
1611 uint64_t u64Tmp;
1612 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);
1613 if (rcStrict == VINF_SUCCESS)
1614 {
1615 *puEsp = u64Tmp & UINT32_MAX;
1616 *pSelSS = (RTSEL)(u64Tmp >> 32);
1617 return VINF_SUCCESS;
1618 }
1619 break;
1620 }
1621
1622 default:
1623 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1624 }
1625 return rcStrict;
1626}
1627
1628
1629/**
1630 * Adjust the CPU state according to the exception being raised.
1631 *
1632 * @param pCtx The CPU context.
1633 * @param u8Vector The exception that has been raised.
1634 */
1635DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1636{
1637 switch (u8Vector)
1638 {
1639 case X86_XCPT_DB:
1640 pCtx->dr[7] &= ~X86_DR7_GD;
1641 break;
1642 /** @todo Read the AMD and Intel exception reference... */
1643 }
1644}
1645
1646
1647/**
1648 * Implements exceptions and interrupts for real mode.
1649 *
1650 * @returns VBox strict status code.
1651 * @param pIemCpu The IEM per CPU instance data.
1652 * @param pCtx The CPU context.
1653 * @param cbInstr The number of bytes to offset rIP by in the return
1654 * address.
1655 * @param u8Vector The interrupt / exception vector number.
1656 * @param fFlags The flags.
1657 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1658 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1659 */
1660static VBOXSTRICTRC
1661iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1662 PCPUMCTX pCtx,
1663 uint8_t cbInstr,
1664 uint8_t u8Vector,
1665 uint32_t fFlags,
1666 uint16_t uErr,
1667 uint64_t uCr2)
1668{
1669 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1670 NOREF(uErr); NOREF(uCr2);
1671
1672 /*
1673 * Read the IDT entry.
1674 */
1675 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1676 {
1677 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1678 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1679 }
1680 RTFAR16 Idte;
1681 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1682 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1683 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1684 return rcStrict;
1685
1686 /*
1687 * Push the stack frame.
1688 */
1689 uint16_t *pu16Frame;
1690 uint64_t uNewRsp;
1691 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1692 if (rcStrict != VINF_SUCCESS)
1693 return rcStrict;
1694
1695 pu16Frame[2] = (uint16_t)pCtx->eflags.u;
1696 pu16Frame[1] = (uint16_t)pCtx->cs;
1697 pu16Frame[0] = pCtx->ip + cbInstr;
1698 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
1699 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1700 return rcStrict;
1701
1702 /*
1703 * Load the vector address into cs:ip and make exception specific state
1704 * adjustments.
1705 */
1706 pCtx->cs = Idte.sel;
1707 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;
1708 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1709 pCtx->rip = Idte.off;
1710 pCtx->eflags.Bits.u1IF = 0;
1711
1712 /** @todo do we actually do this in real mode? */
1713 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1714 iemRaiseXcptAdjustState(pCtx, u8Vector);
1715
1716 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1717}
1718
1719
1720/**
1721 * Implements exceptions and interrupts for protected mode.
1722 *
1723 * @returns VBox strict status code.
1724 * @param pIemCpu The IEM per CPU instance data.
1725 * @param pCtx The CPU context.
1726 * @param cbInstr The number of bytes to offset rIP by in the return
1727 * address.
1728 * @param u8Vector The interrupt / exception vector number.
1729 * @param fFlags The flags.
1730 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1731 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1732 */
1733static VBOXSTRICTRC
1734iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
1735 PCPUMCTX pCtx,
1736 uint8_t cbInstr,
1737 uint8_t u8Vector,
1738 uint32_t fFlags,
1739 uint16_t uErr,
1740 uint64_t uCr2)
1741{
1742 NOREF(cbInstr);
1743
1744 /*
1745 * Read the IDT entry.
1746 */
1747 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1748 {
1749 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1750 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1751 }
1752 X86DESC Idte;
1753 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
1754 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
1755 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1756 return rcStrict;
1757 Log4(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
1758 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1759 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
1760
1761 /*
1762 * Check the descriptor type, DPL and such.
1763 * ASSUMES this is done in the same order as described for call-gate calls.
1764 */
1765 if (Idte.Gate.u1DescType)
1766 {
1767 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1768 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1769 }
1770 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1771 switch (Idte.Gate.u4Type)
1772 {
1773 case X86_SEL_TYPE_SYS_UNDEFINED:
1774 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1775 case X86_SEL_TYPE_SYS_LDT:
1776 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1777 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1778 case X86_SEL_TYPE_SYS_UNDEFINED2:
1779 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1780 case X86_SEL_TYPE_SYS_UNDEFINED3:
1781 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1782 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1783 case X86_SEL_TYPE_SYS_UNDEFINED4:
1784 {
1785 /** @todo check what actually happens when the type is wrong...
1786 * esp. call gates. */
1787 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1788 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1789 }
1790
1791 case X86_SEL_TYPE_SYS_286_INT_GATE:
1792 case X86_SEL_TYPE_SYS_386_INT_GATE:
1793 fEflToClear |= X86_EFL_IF;
1794 break;
1795
1796 case X86_SEL_TYPE_SYS_TASK_GATE:
1797 /** @todo task gates. */
1798 AssertFailedReturn(VERR_NOT_SUPPORTED);
1799
1800 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1801 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1802 break;
1803
1804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1805 }
1806
1807 /* Check DPL against CPL if applicable. */
1808 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1809 {
1810 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
1811 {
1812 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
1813 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1814 }
1815 }
1816
1817 /* Is it there? */
1818 if (!Idte.Gate.u1Present)
1819 {
1820 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1821 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1822 }
1823
1824 /* A null CS is bad. */
1825 RTSEL NewCS = Idte.Gate.u16Sel;
1826 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
1827 {
1828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1829 return iemRaiseGeneralProtectionFault0(pIemCpu);
1830 }
1831
1832 /* Fetch the descriptor for the new CS. */
1833 IEMSELDESC DescCS;
1834 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
1835 if (rcStrict != VINF_SUCCESS)
1836 {
1837 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1838 return rcStrict;
1839 }
1840
1841 /* Must be a code segment. */
1842 if (!DescCS.Legacy.Gen.u1DescType)
1843 {
1844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1845 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1846 }
1847 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1848 {
1849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1850 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1851 }
1852
1853 /* Don't allow lowering the privilege level. */
1854 /** @todo Does the lowering of privileges apply to software interrupts
1855 * only? This has bearings on the more-privileged or
1856 * same-privilege stack behavior further down. A testcase would
1857 * be nice. */
1858 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1859 {
1860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1861 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1862 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1863 }
1864 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
1865
1866 /* Check the new EIP against the new CS limit. */
1867 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1868 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1869 ? Idte.Gate.u16OffsetLow
1870 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1871 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
1872 if (DescCS.Legacy.Gen.u1Granularity)
1873 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1874 if (uNewEip > cbLimitCS)
1875 {
1876 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1877 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1878 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
1879 }
1880
1881 /* Make sure the selector is present. */
1882 if (!DescCS.Legacy.Gen.u1Present)
1883 {
1884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1885 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
1886 }
1887
1888 /*
1889 * If the privilege level changes, we need to get a new stack from the TSS.
1890 * This in turns means validating the new SS and ESP...
1891 */
1892 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1893 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
1894 if (uNewCpl != pIemCpu->uCpl)
1895 {
1896 RTSEL NewSS;
1897 uint32_t uNewEsp;
1898 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 IEMSELDESC DescSS;
1903 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
1904 if (rcStrict != VINF_SUCCESS)
1905 return rcStrict;
1906
1907 /* Check that there is sufficient space for the stack frame. */
1908 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
1909 if (DescSS.Legacy.Gen.u1Granularity)
1910 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1911 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1912
1913 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
1914 if ( uNewEsp - 1 > cbLimitSS
1915 || uNewEsp < cbStackFrame)
1916 {
1917 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1918 u8Vector, NewSS, uNewEsp, cbStackFrame));
1919 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
1920 }
1921
1922 /*
1923 * Start making changes.
1924 */
1925
1926 /* Create the stack frame. */
1927 RTPTRUNION uStackFrame;
1928 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
1929 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
1930 if (rcStrict != VINF_SUCCESS)
1931 return rcStrict;
1932 void * const pvStackFrame = uStackFrame.pv;
1933
1934 if (fFlags & IEM_XCPT_FLAGS_ERR)
1935 *uStackFrame.pu32++ = uErr;
1936 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1937 ? pCtx->eip + cbInstr : pCtx->eip;
1938 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1939 uStackFrame.pu32[2] = pCtx->eflags.u;
1940 uStackFrame.pu32[3] = pCtx->esp;
1941 uStackFrame.pu32[4] = pCtx->ss;
1942 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
1943 if (rcStrict != VINF_SUCCESS)
1944 return rcStrict;
1945
1946 /* Mark the selectors 'accessed' (hope this is the correct time). */
1947 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1948 * after pushing the stack frame? (Write protect the gdt + stack to
1949 * find out.) */
1950 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1951 {
1952 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
1953 if (rcStrict != VINF_SUCCESS)
1954 return rcStrict;
1955 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1956 }
1957
1958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1959 {
1960 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1964 }
1965
1966 /*
1967 * Start commint the register changes (joins with the DPL=CPL branch).
1968 */
1969 pCtx->ss = NewSS;
1970 pCtx->ssHid.u32Limit = cbLimitSS;
1971 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy);
1972 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
1973 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
1974 pIemCpu->uCpl = uNewCpl;
1975 }
1976 /*
1977 * Same privilege, no stack change and smaller stack frame.
1978 */
1979 else
1980 {
1981 uint64_t uNewRsp;
1982 RTPTRUNION uStackFrame;
1983 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
1984 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987 void * const pvStackFrame = uStackFrame.pv;
1988
1989 if (fFlags & IEM_XCPT_FLAGS_ERR)
1990 *uStackFrame.pu32++ = uErr;
1991 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1992 ? pCtx->eip + cbInstr : pCtx->eip;
1993 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl;
1994 uStackFrame.pu32[2] = pCtx->eflags.u;
1995 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
1996 if (rcStrict != VINF_SUCCESS)
1997 return rcStrict;
1998
1999 /* Mark the CS selector as 'accessed'. */
2000 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2001 {
2002 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2003 if (rcStrict != VINF_SUCCESS)
2004 return rcStrict;
2005 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2006 }
2007
2008 /*
2009 * Start committing the register changes (joins with the other branch).
2010 */
2011 pCtx->rsp = uNewRsp;
2012 }
2013
2014 /* ... register committing continues. */
2015 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2016 pCtx->csHid.u32Limit = cbLimitCS;
2017 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy);
2018 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
2019
2020 pCtx->rip = uNewEip;
2021 pCtx->rflags.u &= ~fEflToClear;
2022
2023 if (fFlags & IEM_XCPT_FLAGS_CR2)
2024 pCtx->cr2 = uCr2;
2025
2026 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2027 iemRaiseXcptAdjustState(pCtx, u8Vector);
2028
2029 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2030}
2031
2032
2033/**
2034 * Implements exceptions and interrupts for V8086 mode.
2035 *
2036 * @returns VBox strict status code.
2037 * @param pIemCpu The IEM per CPU instance data.
2038 * @param pCtx The CPU context.
2039 * @param cbInstr The number of bytes to offset rIP by in the return
2040 * address.
2041 * @param u8Vector The interrupt / exception vector number.
2042 * @param fFlags The flags.
2043 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2044 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2045 */
2046static VBOXSTRICTRC
2047iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2048 PCPUMCTX pCtx,
2049 uint8_t cbInstr,
2050 uint8_t u8Vector,
2051 uint32_t fFlags,
2052 uint16_t uErr,
2053 uint64_t uCr2)
2054{
2055 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2056 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
2057 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2058}
2059
2060
2061/**
2062 * Implements exceptions and interrupts for long mode.
2063 *
2064 * @returns VBox strict status code.
2065 * @param pIemCpu The IEM per CPU instance data.
2066 * @param pCtx The CPU context.
2067 * @param cbInstr The number of bytes to offset rIP by in the return
2068 * address.
2069 * @param u8Vector The interrupt / exception vector number.
2070 * @param fFlags The flags.
2071 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2072 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2073 */
2074static VBOXSTRICTRC
2075iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2076 PCPUMCTX pCtx,
2077 uint8_t cbInstr,
2078 uint8_t u8Vector,
2079 uint32_t fFlags,
2080 uint16_t uErr,
2081 uint64_t uCr2)
2082{
2083 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2084 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
2085 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2086}
2087
2088
2089/**
2090 * Implements exceptions and interrupts.
2091 *
2092 * All exceptions and interrupts goes thru this function!
2093 *
2094 * @returns VBox strict status code.
2095 * @param pIemCpu The IEM per CPU instance data.
2096 * @param cbInstr The number of bytes to offset rIP by in the return
2097 * address.
2098 * @param u8Vector The interrupt / exception vector number.
2099 * @param fFlags The flags.
2100 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2101 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2102 */
2103DECL_NO_INLINE(static, VBOXSTRICTRC)
2104iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2105 uint8_t cbInstr,
2106 uint8_t u8Vector,
2107 uint32_t fFlags,
2108 uint16_t uErr,
2109 uint64_t uCr2)
2110{
2111 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2112
2113 /*
2114 * Do recursion accounting.
2115 */
2116 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2117 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2118 if (pIemCpu->cXcptRecursions == 0)
2119 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2120 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2121 else
2122 {
2123 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2124 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2125
2126 /** @todo double and tripple faults. */
2127 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
2128
2129 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2130 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2131 {
2132 ....
2133 } */
2134 }
2135 pIemCpu->cXcptRecursions++;
2136 pIemCpu->uCurXcpt = u8Vector;
2137 pIemCpu->fCurXcpt = fFlags;
2138
2139 /*
2140 * Extensive logging.
2141 */
2142#ifdef LOG_ENABLED
2143 if (LogIs3Enabled())
2144 {
2145 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2146 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2147 char szRegs[4096];
2148 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2149 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2150 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2151 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2152 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2153 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2154 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2155 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2156 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2157 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2158 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2159 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2160 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2161 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2162 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2163 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2164 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2165 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2166 " efer=%016VR{efer}\n"
2167 " pat=%016VR{pat}\n"
2168 " sf_mask=%016VR{sf_mask}\n"
2169 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2170 " lstar=%016VR{lstar}\n"
2171 " star=%016VR{star} cstar=%016VR{cstar}\n"
2172 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2173 );
2174
2175 char szInstr[256];
2176 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2177 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2178 szInstr, sizeof(szInstr), NULL);
2179 Log3(("%s%s\n", szRegs, szInstr));
2180 }
2181#endif /* LOG_ENABLED */
2182
2183 /*
2184 * Call the mode specific worker function.
2185 */
2186 VBOXSTRICTRC rcStrict;
2187 if (!(pCtx->cr0 & X86_CR0_PE))
2188 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2189 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2190 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2191 else if (!pCtx->eflags.Bits.u1VM)
2192 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2193 else
2194 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2195
2196 /*
2197 * Unwind.
2198 */
2199 pIemCpu->cXcptRecursions--;
2200 pIemCpu->uCurXcpt = uPrevXcpt;
2201 pIemCpu->fCurXcpt = fPrevXcpt;
2202 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n",
2203 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp));
2204 return rcStrict;
2205}
2206
2207
2208/** \#DE - 00. */
2209DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2210{
2211 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2212}
2213
2214
2215/** \#DB - 01. */
2216DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2217{
2218 /** @todo set/clear RF. */
2219 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2220}
2221
2222
2223/** \#UD - 06. */
2224DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2225{
2226 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2227}
2228
2229
2230/** \#NM - 07. */
2231DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2232{
2233 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2234}
2235
2236
2237#ifdef SOME_UNUSED_FUNCTION
2238/** \#TS(err) - 0a. */
2239DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2240{
2241 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2242}
2243#endif
2244
2245
2246/** \#TS(tr) - 0a. */
2247DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2248{
2249 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2250 pIemCpu->CTX_SUFF(pCtx)->tr, 0);
2251}
2252
2253
2254/** \#NP(err) - 0b. */
2255DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2256{
2257 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2258}
2259
2260
2261/** \#NP(seg) - 0b. */
2262DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2263{
2264 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2265 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2266}
2267
2268
2269/** \#NP(sel) - 0b. */
2270DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2271{
2272 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2273 uSel & ~X86_SEL_RPL, 0);
2274}
2275
2276
2277/** \#SS(seg) - 0c. */
2278DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2279{
2280 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2281 uSel & ~X86_SEL_RPL, 0);
2282}
2283
2284
2285/** \#GP(n) - 0d. */
2286DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2287{
2288 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2289}
2290
2291
2292/** \#GP(0) - 0d. */
2293DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2294{
2295 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2296}
2297
2298
2299/** \#GP(sel) - 0d. */
2300DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2301{
2302 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2303 Sel & ~X86_SEL_RPL, 0);
2304}
2305
2306
2307/** \#GP(0) - 0d. */
2308DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2309{
2310 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2311}
2312
2313
2314/** \#GP(sel) - 0d. */
2315DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2316{
2317 NOREF(iSegReg); NOREF(fAccess);
2318 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2319 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2320}
2321
2322
2323/** \#GP(sel) - 0d. */
2324DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2325{
2326 NOREF(Sel);
2327 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2328}
2329
2330
2331/** \#GP(sel) - 0d. */
2332DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2333{
2334 NOREF(iSegReg); NOREF(fAccess);
2335 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2336}
2337
2338
2339/** \#PF(n) - 0e. */
2340DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2341{
2342 uint16_t uErr;
2343 switch (rc)
2344 {
2345 case VERR_PAGE_NOT_PRESENT:
2346 case VERR_PAGE_TABLE_NOT_PRESENT:
2347 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2348 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2349 uErr = 0;
2350 break;
2351
2352 default:
2353 AssertMsgFailed(("%Rrc\n", rc));
2354 case VERR_ACCESS_DENIED:
2355 uErr = X86_TRAP_PF_P;
2356 break;
2357
2358 /** @todo reserved */
2359 }
2360
2361 if (pIemCpu->uCpl == 3)
2362 uErr |= X86_TRAP_PF_US;
2363
2364 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2365 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2366 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2367 uErr |= X86_TRAP_PF_ID;
2368
2369 /* Note! RW access callers reporting a WRITE protection fault, will clear
2370 the READ flag before calling. So, read-modify-write accesses (RW)
2371 can safely be reported as READ faults. */
2372 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2373 uErr |= X86_TRAP_PF_RW;
2374
2375 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2376 uErr, GCPtrWhere);
2377}
2378
2379
2380/** \#MF(0) - 10. */
2381DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2382{
2383 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2384}
2385
2386
2387/** \#AC(0) - 11. */
2388DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2389{
2390 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2391}
2392
2393
2394/**
2395 * Macro for calling iemCImplRaiseDivideError().
2396 *
2397 * This enables us to add/remove arguments and force different levels of
2398 * inlining as we wish.
2399 *
2400 * @return Strict VBox status code.
2401 */
2402#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2403IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2404{
2405 NOREF(cbInstr);
2406 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2407}
2408
2409
2410/**
2411 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2412 *
2413 * This enables us to add/remove arguments and force different levels of
2414 * inlining as we wish.
2415 *
2416 * @return Strict VBox status code.
2417 */
2418#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2419IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2420{
2421 NOREF(cbInstr);
2422 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2423}
2424
2425
2426/**
2427 * Macro for calling iemCImplRaiseInvalidOpcode().
2428 *
2429 * This enables us to add/remove arguments and force different levels of
2430 * inlining as we wish.
2431 *
2432 * @return Strict VBox status code.
2433 */
2434#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2435IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2436{
2437 NOREF(cbInstr);
2438 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2439}
2440
2441
2442/** @} */
2443
2444
2445/*
2446 *
2447 * Helpers routines.
2448 * Helpers routines.
2449 * Helpers routines.
2450 *
2451 */
2452
2453/**
2454 * Recalculates the effective operand size.
2455 *
2456 * @param pIemCpu The IEM state.
2457 */
2458static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2459{
2460 switch (pIemCpu->enmCpuMode)
2461 {
2462 case IEMMODE_16BIT:
2463 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2464 break;
2465 case IEMMODE_32BIT:
2466 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2467 break;
2468 case IEMMODE_64BIT:
2469 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2470 {
2471 case 0:
2472 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2473 break;
2474 case IEM_OP_PRF_SIZE_OP:
2475 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2476 break;
2477 case IEM_OP_PRF_SIZE_REX_W:
2478 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2479 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2480 break;
2481 }
2482 break;
2483 default:
2484 AssertFailed();
2485 }
2486}
2487
2488
2489/**
2490 * Sets the default operand size to 64-bit and recalculates the effective
2491 * operand size.
2492 *
2493 * @param pIemCpu The IEM state.
2494 */
2495static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2496{
2497 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2498 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2499 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2500 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2501 else
2502 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2503}
2504
2505
2506/*
2507 *
2508 * Common opcode decoders.
2509 * Common opcode decoders.
2510 * Common opcode decoders.
2511 *
2512 */
2513#include <iprt/mem.h>
2514
2515/**
2516 * Used to add extra details about a stub case.
2517 * @param pIemCpu The IEM per CPU state.
2518 */
2519static void iemOpStubMsg2(PIEMCPU pIemCpu)
2520{
2521 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2522 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2523 char szRegs[4096];
2524 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2525 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2526 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2527 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2528 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2529 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2530 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2531 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2532 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2533 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2534 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2535 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2536 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2537 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2538 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2539 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2540 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2541 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2542 " efer=%016VR{efer}\n"
2543 " pat=%016VR{pat}\n"
2544 " sf_mask=%016VR{sf_mask}\n"
2545 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2546 " lstar=%016VR{lstar}\n"
2547 " star=%016VR{star} cstar=%016VR{cstar}\n"
2548 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2549 );
2550
2551 char szInstr[256];
2552 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2553 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2554 szInstr, sizeof(szInstr), NULL);
2555
2556 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2557}
2558
2559
2560/** Stubs an opcode. */
2561#define FNIEMOP_STUB(a_Name) \
2562 FNIEMOP_DEF(a_Name) \
2563 { \
2564 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2565 iemOpStubMsg2(pIemCpu); \
2566 RTAssertPanic(); \
2567 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2568 } \
2569 typedef int ignore_semicolon
2570
2571/** Stubs an opcode. */
2572#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2573 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2574 { \
2575 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2576 iemOpStubMsg2(pIemCpu); \
2577 RTAssertPanic(); \
2578 NOREF(a_Name0); \
2579 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2580 } \
2581 typedef int ignore_semicolon
2582
2583
2584
2585/** @name Register Access.
2586 * @{
2587 */
2588
2589/**
2590 * Gets a reference (pointer) to the specified hidden segment register.
2591 *
2592 * @returns Hidden register reference.
2593 * @param pIemCpu The per CPU data.
2594 * @param iSegReg The segment register.
2595 */
2596static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2597{
2598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2599 switch (iSegReg)
2600 {
2601 case X86_SREG_ES: return &pCtx->esHid;
2602 case X86_SREG_CS: return &pCtx->csHid;
2603 case X86_SREG_SS: return &pCtx->ssHid;
2604 case X86_SREG_DS: return &pCtx->dsHid;
2605 case X86_SREG_FS: return &pCtx->fsHid;
2606 case X86_SREG_GS: return &pCtx->gsHid;
2607 }
2608 AssertFailedReturn(NULL);
2609}
2610
2611
2612/**
2613 * Gets a reference (pointer) to the specified segment register (the selector
2614 * value).
2615 *
2616 * @returns Pointer to the selector variable.
2617 * @param pIemCpu The per CPU data.
2618 * @param iSegReg The segment register.
2619 */
2620static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2621{
2622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2623 switch (iSegReg)
2624 {
2625 case X86_SREG_ES: return &pCtx->es;
2626 case X86_SREG_CS: return &pCtx->cs;
2627 case X86_SREG_SS: return &pCtx->ss;
2628 case X86_SREG_DS: return &pCtx->ds;
2629 case X86_SREG_FS: return &pCtx->fs;
2630 case X86_SREG_GS: return &pCtx->gs;
2631 }
2632 AssertFailedReturn(NULL);
2633}
2634
2635
2636/**
2637 * Fetches the selector value of a segment register.
2638 *
2639 * @returns The selector value.
2640 * @param pIemCpu The per CPU data.
2641 * @param iSegReg The segment register.
2642 */
2643static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
2644{
2645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2646 switch (iSegReg)
2647 {
2648 case X86_SREG_ES: return pCtx->es;
2649 case X86_SREG_CS: return pCtx->cs;
2650 case X86_SREG_SS: return pCtx->ss;
2651 case X86_SREG_DS: return pCtx->ds;
2652 case X86_SREG_FS: return pCtx->fs;
2653 case X86_SREG_GS: return pCtx->gs;
2654 }
2655 AssertFailedReturn(0xffff);
2656}
2657
2658
2659/**
2660 * Gets a reference (pointer) to the specified general register.
2661 *
2662 * @returns Register reference.
2663 * @param pIemCpu The per CPU data.
2664 * @param iReg The general register.
2665 */
2666static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
2667{
2668 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2669 switch (iReg)
2670 {
2671 case X86_GREG_xAX: return &pCtx->rax;
2672 case X86_GREG_xCX: return &pCtx->rcx;
2673 case X86_GREG_xDX: return &pCtx->rdx;
2674 case X86_GREG_xBX: return &pCtx->rbx;
2675 case X86_GREG_xSP: return &pCtx->rsp;
2676 case X86_GREG_xBP: return &pCtx->rbp;
2677 case X86_GREG_xSI: return &pCtx->rsi;
2678 case X86_GREG_xDI: return &pCtx->rdi;
2679 case X86_GREG_x8: return &pCtx->r8;
2680 case X86_GREG_x9: return &pCtx->r9;
2681 case X86_GREG_x10: return &pCtx->r10;
2682 case X86_GREG_x11: return &pCtx->r11;
2683 case X86_GREG_x12: return &pCtx->r12;
2684 case X86_GREG_x13: return &pCtx->r13;
2685 case X86_GREG_x14: return &pCtx->r14;
2686 case X86_GREG_x15: return &pCtx->r15;
2687 }
2688 AssertFailedReturn(NULL);
2689}
2690
2691
2692/**
2693 * Gets a reference (pointer) to the specified 8-bit general register.
2694 *
2695 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
2696 *
2697 * @returns Register reference.
2698 * @param pIemCpu The per CPU data.
2699 * @param iReg The register.
2700 */
2701static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
2702{
2703 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
2704 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
2705
2706 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
2707 if (iReg >= 4)
2708 pu8Reg++;
2709 return pu8Reg;
2710}
2711
2712
2713/**
2714 * Fetches the value of a 8-bit general register.
2715 *
2716 * @returns The register value.
2717 * @param pIemCpu The per CPU data.
2718 * @param iReg The register.
2719 */
2720static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
2721{
2722 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
2723 return *pbSrc;
2724}
2725
2726
2727/**
2728 * Fetches the value of a 16-bit general register.
2729 *
2730 * @returns The register value.
2731 * @param pIemCpu The per CPU data.
2732 * @param iReg The register.
2733 */
2734static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
2735{
2736 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
2737}
2738
2739
2740/**
2741 * Fetches the value of a 32-bit general register.
2742 *
2743 * @returns The register value.
2744 * @param pIemCpu The per CPU data.
2745 * @param iReg The register.
2746 */
2747static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
2748{
2749 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
2750}
2751
2752
2753/**
2754 * Fetches the value of a 64-bit general register.
2755 *
2756 * @returns The register value.
2757 * @param pIemCpu The per CPU data.
2758 * @param iReg The register.
2759 */
2760static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
2761{
2762 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
2763}
2764
2765
2766/**
2767 * Is the FPU state in FXSAVE format or not.
2768 *
2769 * @returns true if it is, false if it's in FNSAVE.
2770 * @param pVCpu The virtual CPU handle.
2771 */
2772DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
2773{
2774#ifdef RT_ARCH_AMD64
2775 NOREF(pIemCpu);
2776 return true;
2777#else
2778 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
2779 return true;
2780#endif
2781}
2782
2783
2784/**
2785 * Gets the FPU status word.
2786 *
2787 * @returns FPU status word
2788 * @param pIemCpu The per CPU data.
2789 */
2790static uint16_t iemFRegFetchFsw(PIEMCPU pIemCpu)
2791{
2792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2793 uint16_t u16Fsw;
2794 if (iemFRegIsFxSaveFormat(pIemCpu))
2795 u16Fsw = pCtx->fpu.FSW;
2796 else
2797 {
2798 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
2799 u16Fsw = pFpu->FSW;
2800 }
2801 return u16Fsw;
2802}
2803
2804/**
2805 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
2806 *
2807 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2808 * segment limit.
2809 *
2810 * @param pIemCpu The per CPU data.
2811 * @param offNextInstr The offset of the next instruction.
2812 */
2813static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
2814{
2815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2816 switch (pIemCpu->enmEffOpSize)
2817 {
2818 case IEMMODE_16BIT:
2819 {
2820 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2821 if ( uNewIp > pCtx->csHid.u32Limit
2822 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2823 return iemRaiseGeneralProtectionFault0(pIemCpu);
2824 pCtx->rip = uNewIp;
2825 break;
2826 }
2827
2828 case IEMMODE_32BIT:
2829 {
2830 Assert(pCtx->rip <= UINT32_MAX);
2831 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2832
2833 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2834 if (uNewEip > pCtx->csHid.u32Limit)
2835 return iemRaiseGeneralProtectionFault0(pIemCpu);
2836 pCtx->rip = uNewEip;
2837 break;
2838 }
2839
2840 case IEMMODE_64BIT:
2841 {
2842 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2843
2844 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2845 if (!IEM_IS_CANONICAL(uNewRip))
2846 return iemRaiseGeneralProtectionFault0(pIemCpu);
2847 pCtx->rip = uNewRip;
2848 break;
2849 }
2850
2851 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2852 }
2853
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
2860 *
2861 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2862 * segment limit.
2863 *
2864 * @returns Strict VBox status code.
2865 * @param pIemCpu The per CPU data.
2866 * @param offNextInstr The offset of the next instruction.
2867 */
2868static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
2869{
2870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2871 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
2872
2873 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
2874 if ( uNewIp > pCtx->csHid.u32Limit
2875 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2876 return iemRaiseGeneralProtectionFault0(pIemCpu);
2877 /** @todo Test 16-bit jump in 64-bit mode. */
2878 pCtx->rip = uNewIp;
2879
2880 return VINF_SUCCESS;
2881}
2882
2883
2884/**
2885 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
2886 *
2887 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2888 * segment limit.
2889 *
2890 * @returns Strict VBox status code.
2891 * @param pIemCpu The per CPU data.
2892 * @param offNextInstr The offset of the next instruction.
2893 */
2894static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
2895{
2896 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2897 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
2898
2899 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
2900 {
2901 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2902
2903 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
2904 if (uNewEip > pCtx->csHid.u32Limit)
2905 return iemRaiseGeneralProtectionFault0(pIemCpu);
2906 pCtx->rip = uNewEip;
2907 }
2908 else
2909 {
2910 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2911
2912 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
2913 if (!IEM_IS_CANONICAL(uNewRip))
2914 return iemRaiseGeneralProtectionFault0(pIemCpu);
2915 pCtx->rip = uNewRip;
2916 }
2917 return VINF_SUCCESS;
2918}
2919
2920
2921/**
2922 * Performs a near jump to the specified address.
2923 *
2924 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2925 * segment limit.
2926 *
2927 * @param pIemCpu The per CPU data.
2928 * @param uNewRip The new RIP value.
2929 */
2930static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
2931{
2932 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2933 switch (pIemCpu->enmEffOpSize)
2934 {
2935 case IEMMODE_16BIT:
2936 {
2937 Assert(uNewRip <= UINT16_MAX);
2938 if ( uNewRip > pCtx->csHid.u32Limit
2939 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
2940 return iemRaiseGeneralProtectionFault0(pIemCpu);
2941 /** @todo Test 16-bit jump in 64-bit mode. */
2942 pCtx->rip = uNewRip;
2943 break;
2944 }
2945
2946 case IEMMODE_32BIT:
2947 {
2948 Assert(uNewRip <= UINT32_MAX);
2949 Assert(pCtx->rip <= UINT32_MAX);
2950 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2951
2952 if (uNewRip > pCtx->csHid.u32Limit)
2953 return iemRaiseGeneralProtectionFault0(pIemCpu);
2954 pCtx->rip = uNewRip;
2955 break;
2956 }
2957
2958 case IEMMODE_64BIT:
2959 {
2960 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2961
2962 if (!IEM_IS_CANONICAL(uNewRip))
2963 return iemRaiseGeneralProtectionFault0(pIemCpu);
2964 pCtx->rip = uNewRip;
2965 break;
2966 }
2967
2968 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2969 }
2970
2971 return VINF_SUCCESS;
2972}
2973
2974
2975/**
2976 * Get the address of the top of the stack.
2977 *
2978 * @param pCtx The CPU context which SP/ESP/RSP should be
2979 * read.
2980 */
2981DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
2982{
2983 if (pCtx->ssHid.Attr.n.u1Long)
2984 return pCtx->rsp;
2985 if (pCtx->ssHid.Attr.n.u1DefBig)
2986 return pCtx->esp;
2987 return pCtx->sp;
2988}
2989
2990
2991/**
2992 * Updates the RIP/EIP/IP to point to the next instruction.
2993 *
2994 * @param pIemCpu The per CPU data.
2995 * @param cbInstr The number of bytes to add.
2996 */
2997static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
2998{
2999 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3000 switch (pIemCpu->enmCpuMode)
3001 {
3002 case IEMMODE_16BIT:
3003 Assert(pCtx->rip <= UINT16_MAX);
3004 pCtx->eip += cbInstr;
3005 pCtx->eip &= UINT32_C(0xffff);
3006 break;
3007
3008 case IEMMODE_32BIT:
3009 pCtx->eip += cbInstr;
3010 Assert(pCtx->rip <= UINT32_MAX);
3011 break;
3012
3013 case IEMMODE_64BIT:
3014 pCtx->rip += cbInstr;
3015 break;
3016 default: AssertFailed();
3017 }
3018}
3019
3020
3021/**
3022 * Updates the RIP/EIP/IP to point to the next instruction.
3023 *
3024 * @param pIemCpu The per CPU data.
3025 */
3026static void iemRegUpdateRip(PIEMCPU pIemCpu)
3027{
3028 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3029}
3030
3031
3032/**
3033 * Adds to the stack pointer.
3034 *
3035 * @param pCtx The CPU context which SP/ESP/RSP should be
3036 * updated.
3037 * @param cbToAdd The number of bytes to add.
3038 */
3039DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3040{
3041 if (pCtx->ssHid.Attr.n.u1Long)
3042 pCtx->rsp += cbToAdd;
3043 else if (pCtx->ssHid.Attr.n.u1DefBig)
3044 pCtx->esp += cbToAdd;
3045 else
3046 pCtx->sp += cbToAdd;
3047}
3048
3049
3050/**
3051 * Subtracts from the stack pointer.
3052 *
3053 * @param pCtx The CPU context which SP/ESP/RSP should be
3054 * updated.
3055 * @param cbToSub The number of bytes to subtract.
3056 */
3057DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3058{
3059 if (pCtx->ssHid.Attr.n.u1Long)
3060 pCtx->rsp -= cbToSub;
3061 else if (pCtx->ssHid.Attr.n.u1DefBig)
3062 pCtx->esp -= cbToSub;
3063 else
3064 pCtx->sp -= cbToSub;
3065}
3066
3067
3068/**
3069 * Adds to the temporary stack pointer.
3070 *
3071 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3072 * @param cbToAdd The number of bytes to add.
3073 * @param pCtx Where to get the current stack mode.
3074 */
3075DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)
3076{
3077 if (pCtx->ssHid.Attr.n.u1Long)
3078 pTmpRsp->u += cbToAdd;
3079 else if (pCtx->ssHid.Attr.n.u1DefBig)
3080 pTmpRsp->DWords.dw0 += cbToAdd;
3081 else
3082 pTmpRsp->Words.w0 += cbToAdd;
3083}
3084
3085
3086/**
3087 * Subtracts from the temporary stack pointer.
3088 *
3089 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3090 * @param cbToSub The number of bytes to subtract.
3091 * @param pCtx Where to get the current stack mode.
3092 */
3093DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)
3094{
3095 if (pCtx->ssHid.Attr.n.u1Long)
3096 pTmpRsp->u -= cbToSub;
3097 else if (pCtx->ssHid.Attr.n.u1DefBig)
3098 pTmpRsp->DWords.dw0 -= cbToSub;
3099 else
3100 pTmpRsp->Words.w0 -= cbToSub;
3101}
3102
3103
3104/**
3105 * Calculates the effective stack address for a push of the specified size as
3106 * well as the new RSP value (upper bits may be masked).
3107 *
3108 * @returns Effective stack addressf for the push.
3109 * @param pCtx Where to get the current stack mode.
3110 * @param cbItem The size of the stack item to pop.
3111 * @param puNewRsp Where to return the new RSP value.
3112 */
3113DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3114{
3115 RTUINT64U uTmpRsp;
3116 RTGCPTR GCPtrTop;
3117 uTmpRsp.u = pCtx->rsp;
3118
3119 if (pCtx->ssHid.Attr.n.u1Long)
3120 GCPtrTop = uTmpRsp.u -= cbItem;
3121 else if (pCtx->ssHid.Attr.n.u1DefBig)
3122 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3123 else
3124 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3125 *puNewRsp = uTmpRsp.u;
3126 return GCPtrTop;
3127}
3128
3129
3130/**
3131 * Gets the current stack pointer and calculates the value after a pop of the
3132 * specified size.
3133 *
3134 * @returns Current stack pointer.
3135 * @param pCtx Where to get the current stack mode.
3136 * @param cbItem The size of the stack item to pop.
3137 * @param puNewRsp Where to return the new RSP value.
3138 */
3139DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3140{
3141 RTUINT64U uTmpRsp;
3142 RTGCPTR GCPtrTop;
3143 uTmpRsp.u = pCtx->rsp;
3144
3145 if (pCtx->ssHid.Attr.n.u1Long)
3146 {
3147 GCPtrTop = uTmpRsp.u;
3148 uTmpRsp.u += cbItem;
3149 }
3150 else if (pCtx->ssHid.Attr.n.u1DefBig)
3151 {
3152 GCPtrTop = uTmpRsp.DWords.dw0;
3153 uTmpRsp.DWords.dw0 += cbItem;
3154 }
3155 else
3156 {
3157 GCPtrTop = uTmpRsp.Words.w0;
3158 uTmpRsp.Words.w0 += cbItem;
3159 }
3160 *puNewRsp = uTmpRsp.u;
3161 return GCPtrTop;
3162}
3163
3164
3165/**
3166 * Calculates the effective stack address for a push of the specified size as
3167 * well as the new temporary RSP value (upper bits may be masked).
3168 *
3169 * @returns Effective stack addressf for the push.
3170 * @param pTmpRsp The temporary stack pointer. This is updated.
3171 * @param cbItem The size of the stack item to pop.
3172 * @param puNewRsp Where to return the new RSP value.
3173 */
3174DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3175{
3176 RTGCPTR GCPtrTop;
3177
3178 if (pCtx->ssHid.Attr.n.u1Long)
3179 GCPtrTop = pTmpRsp->u -= cbItem;
3180 else if (pCtx->ssHid.Attr.n.u1DefBig)
3181 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3182 else
3183 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3184 return GCPtrTop;
3185}
3186
3187
3188/**
3189 * Gets the effective stack address for a pop of the specified size and
3190 * calculates and updates the temporary RSP.
3191 *
3192 * @returns Current stack pointer.
3193 * @param pTmpRsp The temporary stack pointer. This is updated.
3194 * @param pCtx Where to get the current stack mode.
3195 * @param cbItem The size of the stack item to pop.
3196 */
3197DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3198{
3199 RTGCPTR GCPtrTop;
3200 if (pCtx->ssHid.Attr.n.u1Long)
3201 {
3202 GCPtrTop = pTmpRsp->u;
3203 pTmpRsp->u += cbItem;
3204 }
3205 else if (pCtx->ssHid.Attr.n.u1DefBig)
3206 {
3207 GCPtrTop = pTmpRsp->DWords.dw0;
3208 pTmpRsp->DWords.dw0 += cbItem;
3209 }
3210 else
3211 {
3212 GCPtrTop = pTmpRsp->Words.w0;
3213 pTmpRsp->Words.w0 += cbItem;
3214 }
3215 return GCPtrTop;
3216}
3217
3218
3219/**
3220 * Checks if an Intel CPUID feature bit is set.
3221 *
3222 * @returns true / false.
3223 *
3224 * @param pIemCpu The IEM per CPU data.
3225 * @param fEdx The EDX bit to test, or 0 if ECX.
3226 * @param fEcx The ECX bit to test, or 0 if EDX.
3227 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3228 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3229 */
3230static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3231{
3232 uint32_t uEax, uEbx, uEcx, uEdx;
3233 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3234 return (fEcx && (uEcx & fEcx))
3235 || (fEdx && (uEdx & fEdx));
3236}
3237
3238
3239/**
3240 * Checks if an AMD CPUID feature bit is set.
3241 *
3242 * @returns true / false.
3243 *
3244 * @param pIemCpu The IEM per CPU data.
3245 * @param fEdx The EDX bit to test, or 0 if ECX.
3246 * @param fEcx The ECX bit to test, or 0 if EDX.
3247 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3248 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3249 */
3250static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3251{
3252 uint32_t uEax, uEbx, uEcx, uEdx;
3253 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3254 return (fEcx && (uEcx & fEcx))
3255 || (fEdx && (uEdx & fEdx));
3256}
3257
3258/** @} */
3259
3260
3261/** @name FPU access and helpers.
3262 *
3263 * @{
3264 */
3265
3266
3267/**
3268 * Hook for preparing to use the host FPU.
3269 *
3270 * This is necessary in ring-0 and raw-mode context.
3271 *
3272 * @param pIemCpu The IEM per CPU data.
3273 */
3274DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3275{
3276#ifdef IN_RING3
3277 NOREF(pIemCpu);
3278#else
3279# error "Implement me"
3280#endif
3281}
3282
3283
3284/**
3285 * Stores a QNaN value into a FPU register.
3286 *
3287 * @param pReg Pointer to the register.
3288 */
3289DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3290{
3291 pReg->au32[0] = UINT32_C(0x00000000);
3292 pReg->au32[1] = UINT32_C(0xc0000000);
3293 pReg->au16[4] = UINT16_C(0xffff);
3294}
3295
3296
3297/**
3298 * Updates the FOP, FPU.CS and FPUIP registers.
3299 *
3300 * @param pIemCpu The IEM per CPU data.
3301 * @param pCtx The CPU context.
3302 */
3303DECLINLINE(void) iemFpuUpdateOpcodeAndIP(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3304{
3305 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3306 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3307 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3308 pCtx->fpu.CS = pCtx->cs;
3309 pCtx->fpu.FPUIP = pCtx->rip;
3310}
3311
3312
3313/**
3314 * Updates the FPU.DS and FPUDP registers.
3315 *
3316 * @param pIemCpu The IEM per CPU data.
3317 * @param pCtx The CPU context.
3318 * @param iEffSeg The effective segment register.
3319 * @param GCPtrEff The effective address relative to @a iEffSeg.
3320 */
3321DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3322{
3323 RTSEL sel;
3324 switch (iEffSeg)
3325 {
3326 case X86_SREG_DS: sel = pCtx->ds; break;
3327 case X86_SREG_SS: sel = pCtx->ss; break;
3328 case X86_SREG_CS: sel = pCtx->cs; break;
3329 case X86_SREG_ES: sel = pCtx->es; break;
3330 case X86_SREG_FS: sel = pCtx->fs; break;
3331 case X86_SREG_GS: sel = pCtx->gs; break;
3332 default:
3333 AssertMsgFailed(("%d\n", iEffSeg));
3334 sel = pCtx->ds;
3335 }
3336 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3337 pCtx->fpu.DS = sel;
3338 pCtx->fpu.FPUDP = GCPtrEff;
3339}
3340
3341
3342/**
3343 * Rotates the stack registers in the push direction.
3344 *
3345 * @param pCtx The CPU context.
3346 * @remarks This is a complete waste of time, but fxsave stores the registers in
3347 * stack order.
3348 */
3349DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3350{
3351 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3352 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3353 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3354 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3355 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3356 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3357 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3358 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3359 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3360}
3361
3362
3363/**
3364 * Rotates the stack registers in the pop direction.
3365 *
3366 * @param pCtx The CPU context.
3367 * @remarks This is a complete waste of time, but fxsave stores the registers in
3368 * stack order.
3369 */
3370DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3371{
3372 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3373 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3374 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3375 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3376 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3377 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3378 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3379 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3380 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3381}
3382
3383
3384#if 0
3385/**
3386 *
3387 * @param pIemCpu The IEM per CPU data.
3388 * @param pResult The FPU operation result to push.
3389 * @param pCtx The CPU context.
3390 * @param iDstReg The destination register,
3391 * @param cStackAdj The stack adjustment on successful operation.
3392 * Note that this is an unsigned value.
3393 * @param fFlags Flags.
3394 */
3395static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx, uint16_t iDstReg,
3396 uint8_t cStackAdj, )
3397{
3398 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3399 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3400
3401 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3402 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3403 {
3404 /* No stack error. */
3405 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3406 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3407 if (!fXcpts)
3408 {
3409 /* No unmasked exceptions, just store the result. */
3410 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3411 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3412 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3413 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3414 }
3415 else
3416 {
3417 AssertFailed();
3418 }
3419
3420 }
3421 else if (pCtx->fpu.FCW & X86_FCW_IM)
3422 {
3423 /* Masked stack overflow. */
3424 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3425 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3426 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3427 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3428 }
3429 else
3430 {
3431 /* Stack overflow exception. */
3432 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3433 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3434 return;
3435 }
3436
3437 iemFpuRotateStackPush(pCtx);
3438}
3439
3440
3441/**
3442 * Writes a FPU result to the FPU stack after inspecting the resulting
3443 * statuses.
3444 *
3445 * @param pIemCpu The IEM per CPU data.
3446 * @param pResult The FPU operation result to push.
3447 * @param iReg The stack relative FPU register number.
3448 */
3449static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iReg)
3450{
3451 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3452 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3453
3454 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iReg) & X86_FSW_TOP_SMASK;
3455
3456 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3457 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3458 if (!fXcpts)
3459 {
3460 /* No unmasked exceptions, just store the result. */
3461 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3462 pCtx->fpu.FSW |= (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3463 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3464 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3465 }
3466 else
3467 {
3468 AssertFailed();
3469 }
3470}
3471#endif
3472
3473
3474/**
3475 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3476 * statuses.
3477 *
3478 * @param pIemCpu The IEM per CPU data.
3479 * @param pResult The FPU operation result to push.
3480 */
3481static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3482{
3483 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3484 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3485
3486 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
3487 if (!(RT_BIT(iNewTop) & pCtx->fpu.FTW))
3488 {
3489 /* No stack error. */
3490 uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
3491 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_DM | X86_FCW_ZM | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM));
3492 if (!fXcpts)
3493 {
3494 /* No unmasked exceptions, just store the result. */
3495 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3496 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
3497 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3498 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3499 }
3500 else
3501 {
3502 AssertFailed();
3503 }
3504
3505 }
3506 else if (pCtx->fpu.FCW & X86_FCW_IM)
3507 {
3508 /* Masked stack overflow. */
3509 pCtx->fpu.FSW &= X86_FSW_TOP_MASK | X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3510 pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
3511 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3512 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3513 }
3514 else
3515 {
3516 /* Stack overflow exception. */
3517 pCtx->fpu.FSW &= X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3;
3518 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
3519 return;
3520 }
3521
3522 iemFpuRotateStackPush(pCtx);
3523}
3524
3525
3526/**
3527 * Pushes a FPU result onto the FPU stack after inspecting the resulting
3528 * statuses, and sets FPU.DS and FPUDP.
3529 *
3530 * @param pIemCpu The IEM per CPU data.
3531 * @param pResult The FPU operation result to push.
3532 * @param iEffSeg The effective segment register.
3533 * @param GCPtrEff The effective address relative to @a iEffSeg.
3534 */
3535static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3536{
3537 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3538 iemFpuPushResult(pIemCpu, pResult);
3539}
3540
3541
3542/**
3543 * Stores a result in a FPU register and updates the FSW and FTW.
3544 *
3545 * @param pIemCpu The IEM per CPU data.
3546 * @param pResult The result to store.
3547 * @param iStReg Which FPU register to store it in.
3548 * @param pCtx The CPU context.
3549 */
3550static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3551{
3552 Assert(iStReg < 8);
3553 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3554 pCtx->fpu.FSW &= X86_FSW_C_MASK;
3555 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3556 pCtx->fpu.FTW |= RT_BIT(iReg);
3557 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3558}
3559
3560
3561/**
3562 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3563 * FOP.
3564 *
3565 * @param pIemCpu The IEM per CPU data.
3566 * @param pResult The result to store.
3567 * @param iStReg Which FPU register to store it in.
3568 * @param pCtx The CPU context.
3569 */
3570static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3571{
3572 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3573 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3574 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3575}
3576
3577
3578/**
3579 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3580 * FPUDP, and FPUDS.
3581 *
3582 * @param pIemCpu The IEM per CPU data.
3583 * @param pResult The result to store.
3584 * @param iStReg Which FPU register to store it in.
3585 * @param pCtx The CPU context.
3586 */
3587static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3588{
3589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3590 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3591 iemFpuUpdateOpcodeAndIP(pIemCpu, pCtx);
3592 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3593}
3594
3595
3596/** @} */
3597
3598
3599/** @name Memory access.
3600 *
3601 * @{
3602 */
3603
3604
3605/**
3606 * Checks if the given segment can be written to, raise the appropriate
3607 * exception if not.
3608 *
3609 * @returns VBox strict status code.
3610 *
3611 * @param pIemCpu The IEM per CPU data.
3612 * @param pHid Pointer to the hidden register.
3613 * @param iSegReg The register number.
3614 */
3615static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3616{
3617 if (!pHid->Attr.n.u1Present)
3618 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3619
3620 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
3621 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3622 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3623 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
3624
3625 /** @todo DPL/RPL/CPL? */
3626
3627 return VINF_SUCCESS;
3628}
3629
3630
3631/**
3632 * Checks if the given segment can be read from, raise the appropriate
3633 * exception if not.
3634 *
3635 * @returns VBox strict status code.
3636 *
3637 * @param pIemCpu The IEM per CPU data.
3638 * @param pHid Pointer to the hidden register.
3639 * @param iSegReg The register number.
3640 */
3641static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
3642{
3643 if (!pHid->Attr.n.u1Present)
3644 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
3645
3646 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
3647 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
3648 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
3649
3650 /** @todo DPL/RPL/CPL? */
3651
3652 return VINF_SUCCESS;
3653}
3654
3655
3656/**
3657 * Applies the segment limit, base and attributes.
3658 *
3659 * This may raise a \#GP or \#SS.
3660 *
3661 * @returns VBox strict status code.
3662 *
3663 * @param pIemCpu The IEM per CPU data.
3664 * @param fAccess The kind of access which is being performed.
3665 * @param iSegReg The index of the segment register to apply.
3666 * This is UINT8_MAX if none (for IDT, GDT, LDT,
3667 * TSS, ++).
3668 * @param pGCPtrMem Pointer to the guest memory address to apply
3669 * segmentation to. Input and output parameter.
3670 */
3671static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
3672 size_t cbMem, PRTGCPTR pGCPtrMem)
3673{
3674 if (iSegReg == UINT8_MAX)
3675 return VINF_SUCCESS;
3676
3677 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
3678 switch (pIemCpu->enmCpuMode)
3679 {
3680 case IEMMODE_16BIT:
3681 case IEMMODE_32BIT:
3682 {
3683 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
3684 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
3685
3686 Assert(pSel->Attr.n.u1Present);
3687 Assert(pSel->Attr.n.u1DescType);
3688 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
3689 {
3690 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3691 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
3692 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3693
3694 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3695 {
3696 /** @todo CPL check. */
3697 }
3698
3699 /*
3700 * There are two kinds of data selectors, normal and expand down.
3701 */
3702 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
3703 {
3704 if ( GCPtrFirst32 > pSel->u32Limit
3705 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3706 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3707
3708 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3709 }
3710 else
3711 {
3712 /** @todo implement expand down segments. */
3713 AssertFailed(/** @todo implement this */);
3714 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
3715 }
3716 }
3717 else
3718 {
3719
3720 /*
3721 * Code selector and usually be used to read thru, writing is
3722 * only permitted in real and V8086 mode.
3723 */
3724 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3725 || ( (fAccess & IEM_ACCESS_TYPE_READ)
3726 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
3727 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
3728 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
3729
3730 if ( GCPtrFirst32 > pSel->u32Limit
3731 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
3732 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
3733
3734 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3735 {
3736 /** @todo CPL check. */
3737 }
3738
3739 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
3740 }
3741 return VINF_SUCCESS;
3742 }
3743
3744 case IEMMODE_64BIT:
3745 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
3746 *pGCPtrMem += pSel->u64Base;
3747 return VINF_SUCCESS;
3748
3749 default:
3750 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
3751 }
3752}
3753
3754
3755/**
3756 * Translates a virtual address to a physical physical address and checks if we
3757 * can access the page as specified.
3758 *
3759 * @param pIemCpu The IEM per CPU data.
3760 * @param GCPtrMem The virtual address.
3761 * @param fAccess The intended access.
3762 * @param pGCPhysMem Where to return the physical address.
3763 */
3764static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
3765 PRTGCPHYS pGCPhysMem)
3766{
3767 /** @todo Need a different PGM interface here. We're currently using
3768 * generic / REM interfaces. this won't cut it for R0 & RC. */
3769 RTGCPHYS GCPhys;
3770 uint64_t fFlags;
3771 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
3772 if (RT_FAILURE(rc))
3773 {
3774 /** @todo Check unassigned memory in unpaged mode. */
3775 /** @todo Reserved bits in page tables. Requires new PGM interface. */
3776 *pGCPhysMem = NIL_RTGCPHYS;
3777 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
3778 }
3779
3780 /* If the page is writable and does not have the no-exec bit set, all
3781 access is allowed. Otherwise we'll have to check more carefully... */
3782 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
3783 {
3784 /* Write to read only memory? */
3785 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
3786 && !(fFlags & X86_PTE_RW)
3787 && ( pIemCpu->uCpl != 0
3788 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
3789 {
3790 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page\n", GCPtrMem));
3791 *pGCPhysMem = NIL_RTGCPHYS;
3792 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
3793 }
3794
3795 /* Kernel memory accessed by userland? */
3796 if ( !(fFlags & X86_PTE_US)
3797 && pIemCpu->uCpl == 3
3798 && !(fAccess & IEM_ACCESS_WHAT_SYS))
3799 {
3800 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page\n", GCPtrMem));
3801 *pGCPhysMem = NIL_RTGCPHYS;
3802 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
3803 }
3804
3805 /* Executing non-executable memory? */
3806 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
3807 && (fFlags & X86_PTE_PAE_NX)
3808 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
3809 {
3810 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX\n", GCPtrMem));
3811 *pGCPhysMem = NIL_RTGCPHYS;
3812 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
3813 VERR_ACCESS_DENIED);
3814 }
3815 }
3816
3817 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
3818 *pGCPhysMem = GCPhys;
3819 return VINF_SUCCESS;
3820}
3821
3822
3823
3824/**
3825 * Maps a physical page.
3826 *
3827 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
3828 * @param pIemCpu The IEM per CPU data.
3829 * @param GCPhysMem The physical address.
3830 * @param fAccess The intended access.
3831 * @param ppvMem Where to return the mapping address.
3832 */
3833static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)
3834{
3835#ifdef IEM_VERIFICATION_MODE
3836 /* Force the alternative path so we can ignore writes. */
3837 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
3838 return VERR_PGM_PHYS_TLB_CATCH_ALL;
3839#endif
3840
3841 /*
3842 * If we can map the page without trouble, do a block processing
3843 * until the end of the current page.
3844 */
3845 /** @todo need some better API. */
3846 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
3847 GCPhysMem,
3848 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
3849 ppvMem);
3850}
3851
3852
3853/**
3854 * Unmap a page previously mapped by iemMemPageMap.
3855 *
3856 * This is currently a dummy function.
3857 *
3858 * @param pIemCpu The IEM per CPU data.
3859 * @param GCPhysMem The physical address.
3860 * @param fAccess The intended access.
3861 * @param pvMem What iemMemPageMap returned.
3862 */
3863DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
3864{
3865 NOREF(pIemCpu);
3866 NOREF(GCPhysMem);
3867 NOREF(fAccess);
3868 NOREF(pvMem);
3869}
3870
3871
3872/**
3873 * Looks up a memory mapping entry.
3874 *
3875 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
3876 * @param pIemCpu The IEM per CPU data.
3877 * @param pvMem The memory address.
3878 * @param fAccess The access to.
3879 */
3880DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
3881{
3882 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
3883 if ( pIemCpu->aMemMappings[0].pv == pvMem
3884 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3885 return 0;
3886 if ( pIemCpu->aMemMappings[1].pv == pvMem
3887 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3888 return 1;
3889 if ( pIemCpu->aMemMappings[2].pv == pvMem
3890 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
3891 return 2;
3892 return VERR_NOT_FOUND;
3893}
3894
3895
3896/**
3897 * Finds a free memmap entry when using iNextMapping doesn't work.
3898 *
3899 * @returns Memory mapping index, 1024 on failure.
3900 * @param pIemCpu The IEM per CPU data.
3901 */
3902static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
3903{
3904 /*
3905 * The easy case.
3906 */
3907 if (pIemCpu->cActiveMappings == 0)
3908 {
3909 pIemCpu->iNextMapping = 1;
3910 return 0;
3911 }
3912
3913 /* There should be enough mappings for all instructions. */
3914 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
3915
3916 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
3917 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
3918 return i;
3919
3920 AssertFailedReturn(1024);
3921}
3922
3923
3924/**
3925 * Commits a bounce buffer that needs writing back and unmaps it.
3926 *
3927 * @returns Strict VBox status code.
3928 * @param pIemCpu The IEM per CPU data.
3929 * @param iMemMap The index of the buffer to commit.
3930 */
3931static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
3932{
3933 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
3934 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
3935
3936 /*
3937 * Do the writing.
3938 */
3939 int rc;
3940 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
3941 && !IEM_VERIFICATION_ENABLED(pIemCpu))
3942 {
3943 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3944 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3945 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
3946 if (!pIemCpu->fByPassHandlers)
3947 {
3948 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3949 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3950 pbBuf,
3951 cbFirst);
3952 if (cbSecond && rc == VINF_SUCCESS)
3953 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
3954 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3955 pbBuf + cbFirst,
3956 cbSecond);
3957 }
3958 else
3959 {
3960 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3961 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
3962 pbBuf,
3963 cbFirst);
3964 if (cbSecond && rc == VINF_SUCCESS)
3965 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
3966 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
3967 pbBuf + cbFirst,
3968 cbSecond);
3969 }
3970 }
3971 else
3972 rc = VINF_SUCCESS;
3973
3974#ifdef IEM_VERIFICATION_MODE
3975 /*
3976 * Record the write(s).
3977 */
3978 if (!pIemCpu->fNoRem)
3979 {
3980 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
3981 if (pEvtRec)
3982 {
3983 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3984 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
3985 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
3986 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
3987 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
3988 *pIemCpu->ppIemEvtRecNext = pEvtRec;
3989 }
3990 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
3991 {
3992 pEvtRec = iemVerifyAllocRecord(pIemCpu);
3993 if (pEvtRec)
3994 {
3995 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
3996 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
3997 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
3998 memcpy(pEvtRec->u.RamWrite.ab,
3999 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4000 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4001 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4002 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4003 }
4004 }
4005 }
4006#endif
4007
4008 /*
4009 * Free the mapping entry.
4010 */
4011 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4012 Assert(pIemCpu->cActiveMappings != 0);
4013 pIemCpu->cActiveMappings--;
4014 return rc;
4015}
4016
4017
4018/**
4019 * iemMemMap worker that deals with a request crossing pages.
4020 */
4021static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4022 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4023{
4024 /*
4025 * Do the address translations.
4026 */
4027 RTGCPHYS GCPhysFirst;
4028 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4029 if (rcStrict != VINF_SUCCESS)
4030 return rcStrict;
4031
4032 RTGCPHYS GCPhysSecond;
4033 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4034 if (rcStrict != VINF_SUCCESS)
4035 return rcStrict;
4036 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4037
4038 /*
4039 * Read in the current memory content if it's a read of execute access.
4040 */
4041 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4042 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4043 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4044
4045 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
4046 {
4047 int rc;
4048 if (!pIemCpu->fByPassHandlers)
4049 {
4050 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4051 if (rc != VINF_SUCCESS)
4052 return rc;
4053 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
4054 if (rc != VINF_SUCCESS)
4055 return rc;
4056 }
4057 else
4058 {
4059 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
4060 if (rc != VINF_SUCCESS)
4061 return rc;
4062 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
4063 if (rc != VINF_SUCCESS)
4064 return rc;
4065 }
4066
4067#ifdef IEM_VERIFICATION_MODE
4068 if (!pIemCpu->fNoRem)
4069 {
4070 /*
4071 * Record the reads.
4072 */
4073 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4074 if (pEvtRec)
4075 {
4076 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4077 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4078 pEvtRec->u.RamRead.cb = cbFirstPage;
4079 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4080 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4081 }
4082 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4083 if (pEvtRec)
4084 {
4085 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4086 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
4087 pEvtRec->u.RamRead.cb = cbSecondPage;
4088 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4089 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4090 }
4091 }
4092#endif
4093 }
4094#ifdef VBOX_STRICT
4095 else
4096 memset(pbBuf, 0xcc, cbMem);
4097#endif
4098#ifdef VBOX_STRICT
4099 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4100 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4101#endif
4102
4103 /*
4104 * Commit the bounce buffer entry.
4105 */
4106 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4107 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
4108 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
4109 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
4110 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
4111 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4112 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4113 pIemCpu->cActiveMappings++;
4114
4115 *ppvMem = pbBuf;
4116 return VINF_SUCCESS;
4117}
4118
4119
4120/**
4121 * iemMemMap woker that deals with iemMemPageMap failures.
4122 */
4123static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
4124 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
4125{
4126 /*
4127 * Filter out conditions we can handle and the ones which shouldn't happen.
4128 */
4129 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE
4130 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
4131 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
4132 {
4133 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
4134 return rcMap;
4135 }
4136 pIemCpu->cPotentialExits++;
4137
4138 /*
4139 * Read in the current memory content if it's a read of execute access.
4140 */
4141 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4142 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))
4143 {
4144 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
4145 memset(pbBuf, 0xff, cbMem);
4146 else
4147 {
4148 int rc;
4149 if (!pIemCpu->fByPassHandlers)
4150 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
4151 else
4152 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
4153 if (rc != VINF_SUCCESS)
4154 return rc;
4155 }
4156
4157#ifdef IEM_VERIFICATION_MODE
4158 if (!pIemCpu->fNoRem)
4159 {
4160 /*
4161 * Record the read.
4162 */
4163 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4164 if (pEvtRec)
4165 {
4166 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
4167 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
4168 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
4169 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4170 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4171 }
4172 }
4173#endif
4174 }
4175#ifdef VBOX_STRICT
4176 else
4177 memset(pbBuf, 0xcc, cbMem);
4178#endif
4179#ifdef VBOX_STRICT
4180 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
4181 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
4182#endif
4183
4184 /*
4185 * Commit the bounce buffer entry.
4186 */
4187 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
4188 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
4189 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
4190 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
4191 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
4192 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
4193 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
4194 pIemCpu->cActiveMappings++;
4195
4196 *ppvMem = pbBuf;
4197 return VINF_SUCCESS;
4198}
4199
4200
4201
4202/**
4203 * Maps the specified guest memory for the given kind of access.
4204 *
4205 * This may be using bounce buffering of the memory if it's crossing a page
4206 * boundary or if there is an access handler installed for any of it. Because
4207 * of lock prefix guarantees, we're in for some extra clutter when this
4208 * happens.
4209 *
4210 * This may raise a \#GP, \#SS, \#PF or \#AC.
4211 *
4212 * @returns VBox strict status code.
4213 *
4214 * @param pIemCpu The IEM per CPU data.
4215 * @param ppvMem Where to return the pointer to the mapped
4216 * memory.
4217 * @param cbMem The number of bytes to map. This is usually 1,
4218 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
4219 * string operations it can be up to a page.
4220 * @param iSegReg The index of the segment register to use for
4221 * this access. The base and limits are checked.
4222 * Use UINT8_MAX to indicate that no segmentation
4223 * is required (for IDT, GDT and LDT accesses).
4224 * @param GCPtrMem The address of the guest memory.
4225 * @param a_fAccess How the memory is being accessed. The
4226 * IEM_ACCESS_TYPE_XXX bit is used to figure out
4227 * how to map the memory, while the
4228 * IEM_ACCESS_WHAT_XXX bit is used when raising
4229 * exceptions.
4230 */
4231static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
4232{
4233 /*
4234 * Check the input and figure out which mapping entry to use.
4235 */
4236 Assert(cbMem <= 32 || cbMem == 512);
4237 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
4238
4239 unsigned iMemMap = pIemCpu->iNextMapping;
4240 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
4241 {
4242 iMemMap = iemMemMapFindFree(pIemCpu);
4243 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
4244 }
4245
4246 /*
4247 * Map the memory, checking that we can actually access it. If something
4248 * slightly complicated happens, fall back on bounce buffering.
4249 */
4250 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
4251 if (rcStrict != VINF_SUCCESS)
4252 return rcStrict;
4253
4254 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
4255 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
4256
4257 RTGCPHYS GCPhysFirst;
4258 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
4259 if (rcStrict != VINF_SUCCESS)
4260 return rcStrict;
4261
4262 void *pvMem;
4263 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);
4264 if (rcStrict != VINF_SUCCESS)
4265 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
4266
4267 /*
4268 * Fill in the mapping table entry.
4269 */
4270 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
4271 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
4272 pIemCpu->iNextMapping = iMemMap + 1;
4273 pIemCpu->cActiveMappings++;
4274
4275 *ppvMem = pvMem;
4276 return VINF_SUCCESS;
4277}
4278
4279
4280/**
4281 * Commits the guest memory if bounce buffered and unmaps it.
4282 *
4283 * @returns Strict VBox status code.
4284 * @param pIemCpu The IEM per CPU data.
4285 * @param pvMem The mapping.
4286 * @param fAccess The kind of access.
4287 */
4288static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4289{
4290 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
4291 AssertReturn(iMemMap >= 0, iMemMap);
4292
4293 /*
4294 * If it's bounce buffered, we need to write back the buffer.
4295 */
4296 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4297 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
4298 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
4299
4300 /* Free the entry. */
4301 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4302 Assert(pIemCpu->cActiveMappings != 0);
4303 pIemCpu->cActiveMappings--;
4304 return VINF_SUCCESS;
4305}
4306
4307
4308/**
4309 * Fetches a data byte.
4310 *
4311 * @returns Strict VBox status code.
4312 * @param pIemCpu The IEM per CPU data.
4313 * @param pu8Dst Where to return the byte.
4314 * @param iSegReg The index of the segment register to use for
4315 * this access. The base and limits are checked.
4316 * @param GCPtrMem The address of the guest memory.
4317 */
4318static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4319{
4320 /* The lazy approach for now... */
4321 uint8_t const *pu8Src;
4322 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4323 if (rc == VINF_SUCCESS)
4324 {
4325 *pu8Dst = *pu8Src;
4326 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4327 }
4328 return rc;
4329}
4330
4331
4332/**
4333 * Fetches a data word.
4334 *
4335 * @returns Strict VBox status code.
4336 * @param pIemCpu The IEM per CPU data.
4337 * @param pu16Dst Where to return the word.
4338 * @param iSegReg The index of the segment register to use for
4339 * this access. The base and limits are checked.
4340 * @param GCPtrMem The address of the guest memory.
4341 */
4342static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4343{
4344 /* The lazy approach for now... */
4345 uint16_t const *pu16Src;
4346 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4347 if (rc == VINF_SUCCESS)
4348 {
4349 *pu16Dst = *pu16Src;
4350 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
4351 }
4352 return rc;
4353}
4354
4355
4356/**
4357 * Fetches a data dword.
4358 *
4359 * @returns Strict VBox status code.
4360 * @param pIemCpu The IEM per CPU data.
4361 * @param pu32Dst Where to return the dword.
4362 * @param iSegReg The index of the segment register to use for
4363 * this access. The base and limits are checked.
4364 * @param GCPtrMem The address of the guest memory.
4365 */
4366static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4367{
4368 /* The lazy approach for now... */
4369 uint32_t const *pu32Src;
4370 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4371 if (rc == VINF_SUCCESS)
4372 {
4373 *pu32Dst = *pu32Src;
4374 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
4375 }
4376 return rc;
4377}
4378
4379
4380#ifdef SOME_UNUSED_FUNCTION
4381/**
4382 * Fetches a data dword and sign extends it to a qword.
4383 *
4384 * @returns Strict VBox status code.
4385 * @param pIemCpu The IEM per CPU data.
4386 * @param pu64Dst Where to return the sign extended value.
4387 * @param iSegReg The index of the segment register to use for
4388 * this access. The base and limits are checked.
4389 * @param GCPtrMem The address of the guest memory.
4390 */
4391static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4392{
4393 /* The lazy approach for now... */
4394 int32_t const *pi32Src;
4395 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4396 if (rc == VINF_SUCCESS)
4397 {
4398 *pu64Dst = *pi32Src;
4399 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
4400 }
4401#ifdef __GNUC__ /* warning: GCC may be a royal pain */
4402 else
4403 *pu64Dst = 0;
4404#endif
4405 return rc;
4406}
4407#endif
4408
4409
4410/**
4411 * Fetches a data qword.
4412 *
4413 * @returns Strict VBox status code.
4414 * @param pIemCpu The IEM per CPU data.
4415 * @param pu64Dst Where to return the qword.
4416 * @param iSegReg The index of the segment register to use for
4417 * this access. The base and limits are checked.
4418 * @param GCPtrMem The address of the guest memory.
4419 */
4420static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
4421{
4422 /* The lazy approach for now... */
4423 uint64_t const *pu64Src;
4424 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
4425 if (rc == VINF_SUCCESS)
4426 {
4427 *pu64Dst = *pu64Src;
4428 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
4429 }
4430 return rc;
4431}
4432
4433
4434/**
4435 * Fetches a descriptor register (lgdt, lidt).
4436 *
4437 * @returns Strict VBox status code.
4438 * @param pIemCpu The IEM per CPU data.
4439 * @param pcbLimit Where to return the limit.
4440 * @param pGCPTrBase Where to return the base.
4441 * @param iSegReg The index of the segment register to use for
4442 * this access. The base and limits are checked.
4443 * @param GCPtrMem The address of the guest memory.
4444 * @param enmOpSize The effective operand size.
4445 */
4446static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
4447 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
4448{
4449 uint8_t const *pu8Src;
4450 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
4451 (void **)&pu8Src,
4452 enmOpSize == IEMMODE_64BIT
4453 ? 2 + 8
4454 : enmOpSize == IEMMODE_32BIT
4455 ? 2 + 4
4456 : 2 + 3,
4457 iSegReg,
4458 GCPtrMem,
4459 IEM_ACCESS_DATA_R);
4460 if (rcStrict == VINF_SUCCESS)
4461 {
4462 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
4463 switch (enmOpSize)
4464 {
4465 case IEMMODE_16BIT:
4466 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
4467 break;
4468 case IEMMODE_32BIT:
4469 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
4470 break;
4471 case IEMMODE_64BIT:
4472 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
4473 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
4474 break;
4475
4476 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4477 }
4478 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
4479 }
4480 return rcStrict;
4481}
4482
4483
4484
4485/**
4486 * Stores a data byte.
4487 *
4488 * @returns Strict VBox status code.
4489 * @param pIemCpu The IEM per CPU data.
4490 * @param iSegReg The index of the segment register to use for
4491 * this access. The base and limits are checked.
4492 * @param GCPtrMem The address of the guest memory.
4493 * @param u8Value The value to store.
4494 */
4495static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
4496{
4497 /* The lazy approach for now... */
4498 uint8_t *pu8Dst;
4499 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4500 if (rc == VINF_SUCCESS)
4501 {
4502 *pu8Dst = u8Value;
4503 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
4504 }
4505 return rc;
4506}
4507
4508
4509/**
4510 * Stores a data word.
4511 *
4512 * @returns Strict VBox status code.
4513 * @param pIemCpu The IEM per CPU data.
4514 * @param iSegReg The index of the segment register to use for
4515 * this access. The base and limits are checked.
4516 * @param GCPtrMem The address of the guest memory.
4517 * @param u16Value The value to store.
4518 */
4519static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
4520{
4521 /* The lazy approach for now... */
4522 uint16_t *pu16Dst;
4523 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4524 if (rc == VINF_SUCCESS)
4525 {
4526 *pu16Dst = u16Value;
4527 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
4528 }
4529 return rc;
4530}
4531
4532
4533/**
4534 * Stores a data dword.
4535 *
4536 * @returns Strict VBox status code.
4537 * @param pIemCpu The IEM per CPU data.
4538 * @param iSegReg The index of the segment register to use for
4539 * this access. The base and limits are checked.
4540 * @param GCPtrMem The address of the guest memory.
4541 * @param u32Value The value to store.
4542 */
4543static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
4544{
4545 /* The lazy approach for now... */
4546 uint32_t *pu32Dst;
4547 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4548 if (rc == VINF_SUCCESS)
4549 {
4550 *pu32Dst = u32Value;
4551 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
4552 }
4553 return rc;
4554}
4555
4556
4557/**
4558 * Stores a data qword.
4559 *
4560 * @returns Strict VBox status code.
4561 * @param pIemCpu The IEM per CPU data.
4562 * @param iSegReg The index of the segment register to use for
4563 * this access. The base and limits are checked.
4564 * @param GCPtrMem The address of the guest memory.
4565 * @param u64Value The value to store.
4566 */
4567static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
4568{
4569 /* The lazy approach for now... */
4570 uint64_t *pu64Dst;
4571 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
4572 if (rc == VINF_SUCCESS)
4573 {
4574 *pu64Dst = u64Value;
4575 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
4576 }
4577 return rc;
4578}
4579
4580
4581/**
4582 * Pushes a word onto the stack.
4583 *
4584 * @returns Strict VBox status code.
4585 * @param pIemCpu The IEM per CPU data.
4586 * @param u16Value The value to push.
4587 */
4588static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
4589{
4590 /* Increment the stack pointer. */
4591 uint64_t uNewRsp;
4592 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4593 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
4594
4595 /* Write the word the lazy way. */
4596 uint16_t *pu16Dst;
4597 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4598 if (rc == VINF_SUCCESS)
4599 {
4600 *pu16Dst = u16Value;
4601 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4602 }
4603
4604 /* Commit the new RSP value unless we an access handler made trouble. */
4605 if (rc == VINF_SUCCESS)
4606 pCtx->rsp = uNewRsp;
4607
4608 return rc;
4609}
4610
4611
4612/**
4613 * Pushes a dword onto the stack.
4614 *
4615 * @returns Strict VBox status code.
4616 * @param pIemCpu The IEM per CPU data.
4617 * @param u32Value The value to push.
4618 */
4619static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
4620{
4621 /* Increment the stack pointer. */
4622 uint64_t uNewRsp;
4623 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4624 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
4625
4626 /* Write the word the lazy way. */
4627 uint32_t *pu32Dst;
4628 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4629 if (rc == VINF_SUCCESS)
4630 {
4631 *pu32Dst = u32Value;
4632 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4633 }
4634
4635 /* Commit the new RSP value unless we an access handler made trouble. */
4636 if (rc == VINF_SUCCESS)
4637 pCtx->rsp = uNewRsp;
4638
4639 return rc;
4640}
4641
4642
4643/**
4644 * Pushes a qword onto the stack.
4645 *
4646 * @returns Strict VBox status code.
4647 * @param pIemCpu The IEM per CPU data.
4648 * @param u64Value The value to push.
4649 */
4650static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
4651{
4652 /* Increment the stack pointer. */
4653 uint64_t uNewRsp;
4654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4655 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
4656
4657 /* Write the word the lazy way. */
4658 uint64_t *pu64Dst;
4659 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4660 if (rc == VINF_SUCCESS)
4661 {
4662 *pu64Dst = u64Value;
4663 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4664 }
4665
4666 /* Commit the new RSP value unless we an access handler made trouble. */
4667 if (rc == VINF_SUCCESS)
4668 pCtx->rsp = uNewRsp;
4669
4670 return rc;
4671}
4672
4673
4674/**
4675 * Pops a word from the stack.
4676 *
4677 * @returns Strict VBox status code.
4678 * @param pIemCpu The IEM per CPU data.
4679 * @param pu16Value Where to store the popped value.
4680 */
4681static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
4682{
4683 /* Increment the stack pointer. */
4684 uint64_t uNewRsp;
4685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4686 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
4687
4688 /* Write the word the lazy way. */
4689 uint16_t const *pu16Src;
4690 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4691 if (rc == VINF_SUCCESS)
4692 {
4693 *pu16Value = *pu16Src;
4694 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4695
4696 /* Commit the new RSP value. */
4697 if (rc == VINF_SUCCESS)
4698 pCtx->rsp = uNewRsp;
4699 }
4700
4701 return rc;
4702}
4703
4704
4705/**
4706 * Pops a dword from the stack.
4707 *
4708 * @returns Strict VBox status code.
4709 * @param pIemCpu The IEM per CPU data.
4710 * @param pu32Value Where to store the popped value.
4711 */
4712static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
4713{
4714 /* Increment the stack pointer. */
4715 uint64_t uNewRsp;
4716 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4717 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
4718
4719 /* Write the word the lazy way. */
4720 uint32_t const *pu32Src;
4721 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4722 if (rc == VINF_SUCCESS)
4723 {
4724 *pu32Value = *pu32Src;
4725 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4726
4727 /* Commit the new RSP value. */
4728 if (rc == VINF_SUCCESS)
4729 pCtx->rsp = uNewRsp;
4730 }
4731
4732 return rc;
4733}
4734
4735
4736/**
4737 * Pops a qword from the stack.
4738 *
4739 * @returns Strict VBox status code.
4740 * @param pIemCpu The IEM per CPU data.
4741 * @param pu64Value Where to store the popped value.
4742 */
4743static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
4744{
4745 /* Increment the stack pointer. */
4746 uint64_t uNewRsp;
4747 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4748 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
4749
4750 /* Write the word the lazy way. */
4751 uint64_t const *pu64Src;
4752 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4753 if (rc == VINF_SUCCESS)
4754 {
4755 *pu64Value = *pu64Src;
4756 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4757
4758 /* Commit the new RSP value. */
4759 if (rc == VINF_SUCCESS)
4760 pCtx->rsp = uNewRsp;
4761 }
4762
4763 return rc;
4764}
4765
4766
4767/**
4768 * Pushes a word onto the stack, using a temporary stack pointer.
4769 *
4770 * @returns Strict VBox status code.
4771 * @param pIemCpu The IEM per CPU data.
4772 * @param u16Value The value to push.
4773 * @param pTmpRsp Pointer to the temporary stack pointer.
4774 */
4775static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
4776{
4777 /* Increment the stack pointer. */
4778 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4779 RTUINT64U NewRsp = *pTmpRsp;
4780 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
4781
4782 /* Write the word the lazy way. */
4783 uint16_t *pu16Dst;
4784 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4785 if (rc == VINF_SUCCESS)
4786 {
4787 *pu16Dst = u16Value;
4788 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
4789 }
4790
4791 /* Commit the new RSP value unless we an access handler made trouble. */
4792 if (rc == VINF_SUCCESS)
4793 *pTmpRsp = NewRsp;
4794
4795 return rc;
4796}
4797
4798
4799/**
4800 * Pushes a dword onto the stack, using a temporary stack pointer.
4801 *
4802 * @returns Strict VBox status code.
4803 * @param pIemCpu The IEM per CPU data.
4804 * @param u32Value The value to push.
4805 * @param pTmpRsp Pointer to the temporary stack pointer.
4806 */
4807static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
4808{
4809 /* Increment the stack pointer. */
4810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4811 RTUINT64U NewRsp = *pTmpRsp;
4812 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
4813
4814 /* Write the word the lazy way. */
4815 uint32_t *pu32Dst;
4816 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4817 if (rc == VINF_SUCCESS)
4818 {
4819 *pu32Dst = u32Value;
4820 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
4821 }
4822
4823 /* Commit the new RSP value unless we an access handler made trouble. */
4824 if (rc == VINF_SUCCESS)
4825 *pTmpRsp = NewRsp;
4826
4827 return rc;
4828}
4829
4830
4831#ifdef SOME_UNUSED_FUNCTION
4832/**
4833 * Pushes a dword onto the stack, using a temporary stack pointer.
4834 *
4835 * @returns Strict VBox status code.
4836 * @param pIemCpu The IEM per CPU data.
4837 * @param u64Value The value to push.
4838 * @param pTmpRsp Pointer to the temporary stack pointer.
4839 */
4840static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
4841{
4842 /* Increment the stack pointer. */
4843 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4844 RTUINT64U NewRsp = *pTmpRsp;
4845 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
4846
4847 /* Write the word the lazy way. */
4848 uint64_t *pu64Dst;
4849 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4850 if (rc == VINF_SUCCESS)
4851 {
4852 *pu64Dst = u64Value;
4853 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
4854 }
4855
4856 /* Commit the new RSP value unless we an access handler made trouble. */
4857 if (rc == VINF_SUCCESS)
4858 *pTmpRsp = NewRsp;
4859
4860 return rc;
4861}
4862#endif
4863
4864
4865/**
4866 * Pops a word from the stack, using a temporary stack pointer.
4867 *
4868 * @returns Strict VBox status code.
4869 * @param pIemCpu The IEM per CPU data.
4870 * @param pu16Value Where to store the popped value.
4871 * @param pTmpRsp Pointer to the temporary stack pointer.
4872 */
4873static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
4874{
4875 /* Increment the stack pointer. */
4876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4877 RTUINT64U NewRsp = *pTmpRsp;
4878 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
4879
4880 /* Write the word the lazy way. */
4881 uint16_t const *pu16Src;
4882 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4883 if (rc == VINF_SUCCESS)
4884 {
4885 *pu16Value = *pu16Src;
4886 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
4887
4888 /* Commit the new RSP value. */
4889 if (rc == VINF_SUCCESS)
4890 *pTmpRsp = NewRsp;
4891 }
4892
4893 return rc;
4894}
4895
4896
4897/**
4898 * Pops a dword from the stack, using a temporary stack pointer.
4899 *
4900 * @returns Strict VBox status code.
4901 * @param pIemCpu The IEM per CPU data.
4902 * @param pu32Value Where to store the popped value.
4903 * @param pTmpRsp Pointer to the temporary stack pointer.
4904 */
4905static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
4906{
4907 /* Increment the stack pointer. */
4908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4909 RTUINT64U NewRsp = *pTmpRsp;
4910 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
4911
4912 /* Write the word the lazy way. */
4913 uint32_t const *pu32Src;
4914 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4915 if (rc == VINF_SUCCESS)
4916 {
4917 *pu32Value = *pu32Src;
4918 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
4919
4920 /* Commit the new RSP value. */
4921 if (rc == VINF_SUCCESS)
4922 *pTmpRsp = NewRsp;
4923 }
4924
4925 return rc;
4926}
4927
4928
4929/**
4930 * Pops a qword from the stack, using a temporary stack pointer.
4931 *
4932 * @returns Strict VBox status code.
4933 * @param pIemCpu The IEM per CPU data.
4934 * @param pu64Value Where to store the popped value.
4935 * @param pTmpRsp Pointer to the temporary stack pointer.
4936 */
4937static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
4938{
4939 /* Increment the stack pointer. */
4940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4941 RTUINT64U NewRsp = *pTmpRsp;
4942 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
4943
4944 /* Write the word the lazy way. */
4945 uint64_t const *pu64Src;
4946 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
4947 if (rcStrict == VINF_SUCCESS)
4948 {
4949 *pu64Value = *pu64Src;
4950 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
4951
4952 /* Commit the new RSP value. */
4953 if (rcStrict == VINF_SUCCESS)
4954 *pTmpRsp = NewRsp;
4955 }
4956
4957 return rcStrict;
4958}
4959
4960
4961/**
4962 * Begin a special stack push (used by interrupt, exceptions and such).
4963 *
4964 * This will raise #SS or #PF if appropriate.
4965 *
4966 * @returns Strict VBox status code.
4967 * @param pIemCpu The IEM per CPU data.
4968 * @param cbMem The number of bytes to push onto the stack.
4969 * @param ppvMem Where to return the pointer to the stack memory.
4970 * As with the other memory functions this could be
4971 * direct access or bounce buffered access, so
4972 * don't commit register until the commit call
4973 * succeeds.
4974 * @param puNewRsp Where to return the new RSP value. This must be
4975 * passed unchanged to
4976 * iemMemStackPushCommitSpecial().
4977 */
4978static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
4979{
4980 Assert(cbMem < UINT8_MAX);
4981 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4982 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
4983 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
4984}
4985
4986
4987/**
4988 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
4989 *
4990 * This will update the rSP.
4991 *
4992 * @returns Strict VBox status code.
4993 * @param pIemCpu The IEM per CPU data.
4994 * @param pvMem The pointer returned by
4995 * iemMemStackPushBeginSpecial().
4996 * @param uNewRsp The new RSP value returned by
4997 * iemMemStackPushBeginSpecial().
4998 */
4999static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
5000{
5001 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
5002 if (rcStrict == VINF_SUCCESS)
5003 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5004 return rcStrict;
5005}
5006
5007
5008/**
5009 * Begin a special stack pop (used by iret, retf and such).
5010 *
5011 * This will raise \#SS or \#PF if appropriate.
5012 *
5013 * @returns Strict VBox status code.
5014 * @param pIemCpu The IEM per CPU data.
5015 * @param cbMem The number of bytes to push onto the stack.
5016 * @param ppvMem Where to return the pointer to the stack memory.
5017 * @param puNewRsp Where to return the new RSP value. This must be
5018 * passed unchanged to
5019 * iemMemStackPopCommitSpecial() or applied
5020 * manually if iemMemStackPopDoneSpecial() is used.
5021 */
5022static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5023{
5024 Assert(cbMem < UINT8_MAX);
5025 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5026 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
5027 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5028}
5029
5030
5031/**
5032 * Continue a special stack pop (used by iret).
5033 *
5034 * This will raise \#SS or \#PF if appropriate.
5035 *
5036 * @returns Strict VBox status code.
5037 * @param pIemCpu The IEM per CPU data.
5038 * @param cbMem The number of bytes to push onto the stack.
5039 * @param ppvMem Where to return the pointer to the stack memory.
5040 * @param puNewRsp Where to return the new RSP value. This must be
5041 * passed unchanged to
5042 * iemMemStackPopCommitSpecial() or applied
5043 * manually if iemMemStackPopDoneSpecial() is used.
5044 */
5045static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
5046{
5047 Assert(cbMem < UINT8_MAX);
5048 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5049 RTUINT64U NewRsp;
5050 NewRsp.u = *puNewRsp;
5051 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5052 *puNewRsp = NewRsp.u;
5053 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5054}
5055
5056
5057/**
5058 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
5059 *
5060 * This will update the rSP.
5061 *
5062 * @returns Strict VBox status code.
5063 * @param pIemCpu The IEM per CPU data.
5064 * @param pvMem The pointer returned by
5065 * iemMemStackPopBeginSpecial().
5066 * @param uNewRsp The new RSP value returned by
5067 * iemMemStackPopBeginSpecial().
5068 */
5069static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
5070{
5071 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5072 if (rcStrict == VINF_SUCCESS)
5073 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
5074 return rcStrict;
5075}
5076
5077
5078/**
5079 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
5080 * iemMemStackPopContinueSpecial).
5081 *
5082 * The caller will manually commit the rSP.
5083 *
5084 * @returns Strict VBox status code.
5085 * @param pIemCpu The IEM per CPU data.
5086 * @param pvMem The pointer returned by
5087 * iemMemStackPopBeginSpecial() or
5088 * iemMemStackPopContinueSpecial().
5089 */
5090static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
5091{
5092 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
5093}
5094
5095
5096/**
5097 * Fetches a system table dword.
5098 *
5099 * @returns Strict VBox status code.
5100 * @param pIemCpu The IEM per CPU data.
5101 * @param pu32Dst Where to return the dword.
5102 * @param iSegReg The index of the segment register to use for
5103 * this access. The base and limits are checked.
5104 * @param GCPtrMem The address of the guest memory.
5105 */
5106static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5107{
5108 /* The lazy approach for now... */
5109 uint32_t const *pu32Src;
5110 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5111 if (rc == VINF_SUCCESS)
5112 {
5113 *pu32Dst = *pu32Src;
5114 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
5115 }
5116 return rc;
5117}
5118
5119
5120/**
5121 * Fetches a system table qword.
5122 *
5123 * @returns Strict VBox status code.
5124 * @param pIemCpu The IEM per CPU data.
5125 * @param pu64Dst Where to return the qword.
5126 * @param iSegReg The index of the segment register to use for
5127 * this access. The base and limits are checked.
5128 * @param GCPtrMem The address of the guest memory.
5129 */
5130static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5131{
5132 /* The lazy approach for now... */
5133 uint64_t const *pu64Src;
5134 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
5135 if (rc == VINF_SUCCESS)
5136 {
5137 *pu64Dst = *pu64Src;
5138 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
5139 }
5140 return rc;
5141}
5142
5143
5144/**
5145 * Fetches a descriptor table entry.
5146 *
5147 * @returns Strict VBox status code.
5148 * @param pIemCpu The IEM per CPU.
5149 * @param pDesc Where to return the descriptor table entry.
5150 * @param uSel The selector which table entry to fetch.
5151 */
5152static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
5153{
5154 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5155
5156 /** @todo did the 286 require all 8 bytes to be accessible? */
5157 /*
5158 * Get the selector table base and check bounds.
5159 */
5160 RTGCPTR GCPtrBase;
5161 if (uSel & X86_SEL_LDT)
5162 {
5163 if ( !pCtx->ldtrHid.Attr.n.u1Present
5164 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )
5165 {
5166 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
5167 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));
5168 /** @todo is this the right exception? */
5169 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5170 }
5171
5172 Assert(pCtx->ldtrHid.Attr.n.u1Present);
5173 GCPtrBase = pCtx->ldtrHid.u64Base;
5174 }
5175 else
5176 {
5177 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
5178 {
5179 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
5180 /** @todo is this the right exception? */
5181 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5182 }
5183 GCPtrBase = pCtx->gdtr.pGdt;
5184 }
5185
5186 /*
5187 * Read the legacy descriptor and maybe the long mode extensions if
5188 * required.
5189 */
5190 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5191 if (rcStrict == VINF_SUCCESS)
5192 {
5193 if ( !IEM_IS_LONG_MODE(pIemCpu)
5194 || pDesc->Legacy.Gen.u1DescType)
5195 pDesc->Long.au64[1] = 0;
5196 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
5197 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5198 else
5199 {
5200 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
5201 /** @todo is this the right exception? */
5202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
5203 }
5204 }
5205 return rcStrict;
5206}
5207
5208
5209/**
5210 * Marks the selector descriptor as accessed (only non-system descriptors).
5211 *
5212 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
5213 * will therefore skip the limit checks.
5214 *
5215 * @returns Strict VBox status code.
5216 * @param pIemCpu The IEM per CPU.
5217 * @param uSel The selector.
5218 */
5219static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
5220{
5221 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5222
5223 /*
5224 * Get the selector table base and calculate the entry address.
5225 */
5226 RTGCPTR GCPtr = uSel & X86_SEL_LDT
5227 ? pCtx->ldtrHid.u64Base
5228 : pCtx->gdtr.pGdt;
5229 GCPtr += uSel & X86_SEL_MASK;
5230
5231 /*
5232 * ASMAtomicBitSet will assert if the address is misaligned, so do some
5233 * ugly stuff to avoid this. This will make sure it's an atomic access
5234 * as well more or less remove any question about 8-bit or 32-bit accesss.
5235 */
5236 VBOXSTRICTRC rcStrict;
5237 uint32_t volatile *pu32;
5238 if ((GCPtr & 3) == 0)
5239 {
5240 /* The normal case, map the 32-bit bits around the accessed bit (40). */
5241 GCPtr += 2 + 2;
5242 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5243 if (rcStrict != VINF_SUCCESS)
5244 return rcStrict;
5245 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
5246 }
5247 else
5248 {
5249 /* The misaligned GDT/LDT case, map the whole thing. */
5250 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
5251 if (rcStrict != VINF_SUCCESS)
5252 return rcStrict;
5253 switch ((uintptr_t)pu32 & 3)
5254 {
5255 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
5256 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
5257 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
5258 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
5259 }
5260 }
5261
5262 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
5263}
5264
5265/** @} */
5266
5267
5268/*
5269 * Include the C/C++ implementation of instruction.
5270 */
5271#include "IEMAllCImpl.cpp.h"
5272
5273
5274
5275/** @name "Microcode" macros.
5276 *
5277 * The idea is that we should be able to use the same code to interpret
5278 * instructions as well as recompiler instructions. Thus this obfuscation.
5279 *
5280 * @{
5281 */
5282#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
5283#define IEM_MC_END() }
5284#define IEM_MC_PAUSE() do {} while (0)
5285#define IEM_MC_CONTINUE() do {} while (0)
5286
5287/** Internal macro. */
5288#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
5289 do \
5290 { \
5291 VBOXSTRICTRC rcStrict2 = a_Expr; \
5292 if (rcStrict2 != VINF_SUCCESS) \
5293 return rcStrict2; \
5294 } while (0)
5295
5296#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
5297#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
5298#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
5299#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
5300#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
5301#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
5302#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
5303
5304#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
5305#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
5306 do { \
5307 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
5308 return iemRaiseDeviceNotAvailable(pIemCpu); \
5309 } while (0)
5310#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
5311 do { \
5312 if (iemFRegFetchFsw(pIemCpu) & X86_FSW_ES) \
5313 return iemRaiseMathFault(pIemCpu); \
5314 } while (0)
5315#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
5316 do { \
5317 if (pIemCpu->uCpl != 0) \
5318 return iemRaiseGeneralProtectionFault0(pIemCpu); \
5319 } while (0)
5320
5321
5322#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
5323#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
5324#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
5325#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
5326#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
5327#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
5328#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
5329 uint32_t a_Name; \
5330 uint32_t *a_pName = &a_Name
5331#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
5332 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
5333
5334#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
5335#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
5336
5337#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5338#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5339#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5340#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
5341#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5342#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5343#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
5344#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5345#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5346#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
5347#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5348#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
5349#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5350#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
5351#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
5352#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
5353#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
5354#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5355#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5356#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
5357#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5358#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
5359#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
5360#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5361#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5362#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = iemFRegFetchFsw(pIemCpu)
5363
5364#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
5365#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
5366#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
5367#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
5368#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
5369#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
5370#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
5371#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
5372#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
5373#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
5374
5375#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
5376#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
5377/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
5378 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
5379#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
5380#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
5381#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
5382#define IEM_MC_REF_FPUREG_R80(a_pr80Dst, a_iSt) (a_pr80Dst) = &(pIemCpu)->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80
5383
5384#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
5385#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
5386#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
5387 do { \
5388 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5389 *pu32Reg += (a_u32Value); \
5390 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5391 } while (0)
5392#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
5393
5394#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
5395#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
5396#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
5397 do { \
5398 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5399 *pu32Reg -= (a_u32Value); \
5400 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5401 } while (0)
5402#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
5403
5404#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
5405#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
5406#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
5407#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
5408#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
5409#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
5410#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
5411
5412#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
5413#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
5414#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5415#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
5416
5417#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
5418#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
5419#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
5420
5421#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
5422#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5423
5424#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
5425#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
5426#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
5427
5428#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
5429#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
5430#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
5431
5432#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
5433
5434#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
5435
5436#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
5437#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
5438#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
5439 do { \
5440 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5441 *pu32Reg &= (a_u32Value); \
5442 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5443 } while (0)
5444#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
5445
5446#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
5447#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
5448#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
5449 do { \
5450 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
5451 *pu32Reg |= (a_u32Value); \
5452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
5453 } while (0)
5454#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
5455
5456
5457#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
5458#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
5459#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
5460
5461
5462
5463#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
5464 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
5465#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
5466 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
5467#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
5468 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
5469
5470#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5471 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
5472#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5473 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5474
5475#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5476 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
5477#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5478 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5479
5480#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5482
5483#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5484 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
5485#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
5486 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
5487
5488#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
5489 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
5490#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
5491 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
5492#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
5493 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r64Dst), (a_iSeg), (a_GCPtrMem)))
5494
5495
5496#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5497 do { \
5498 uint8_t u8Tmp; \
5499 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5500 (a_u16Dst) = u8Tmp; \
5501 } while (0)
5502#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5503 do { \
5504 uint8_t u8Tmp; \
5505 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5506 (a_u32Dst) = u8Tmp; \
5507 } while (0)
5508#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5509 do { \
5510 uint8_t u8Tmp; \
5511 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5512 (a_u64Dst) = u8Tmp; \
5513 } while (0)
5514#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5515 do { \
5516 uint16_t u16Tmp; \
5517 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5518 (a_u32Dst) = u16Tmp; \
5519 } while (0)
5520#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5521 do { \
5522 uint16_t u16Tmp; \
5523 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5524 (a_u64Dst) = u16Tmp; \
5525 } while (0)
5526#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5527 do { \
5528 uint32_t u32Tmp; \
5529 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5530 (a_u64Dst) = u32Tmp; \
5531 } while (0)
5532
5533#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
5534 do { \
5535 uint8_t u8Tmp; \
5536 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5537 (a_u16Dst) = (int8_t)u8Tmp; \
5538 } while (0)
5539#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5540 do { \
5541 uint8_t u8Tmp; \
5542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5543 (a_u32Dst) = (int8_t)u8Tmp; \
5544 } while (0)
5545#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5546 do { \
5547 uint8_t u8Tmp; \
5548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
5549 (a_u64Dst) = (int8_t)u8Tmp; \
5550 } while (0)
5551#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
5552 do { \
5553 uint16_t u16Tmp; \
5554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5555 (a_u32Dst) = (int16_t)u16Tmp; \
5556 } while (0)
5557#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5558 do { \
5559 uint16_t u16Tmp; \
5560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
5561 (a_u64Dst) = (int16_t)u16Tmp; \
5562 } while (0)
5563#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
5564 do { \
5565 uint32_t u32Tmp; \
5566 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
5567 (a_u64Dst) = (int32_t)u32Tmp; \
5568 } while (0)
5569
5570#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
5571 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
5572#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
5573 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
5574#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
5575 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
5576#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
5577 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
5578
5579#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
5580 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
5581
5582#define IEM_MC_PUSH_U16(a_u16Value) \
5583 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
5584#define IEM_MC_PUSH_U32(a_u32Value) \
5585 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
5586#define IEM_MC_PUSH_U64(a_u64Value) \
5587 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
5588
5589#define IEM_MC_POP_U16(a_pu16Value) \
5590 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
5591#define IEM_MC_POP_U32(a_pu32Value) \
5592 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
5593#define IEM_MC_POP_U64(a_pu64Value) \
5594 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
5595
5596/** Maps guest memory for direct or bounce buffered access.
5597 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5598 * @remarks May return.
5599 */
5600#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
5601 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5602
5603/** Maps guest memory for direct or bounce buffered access.
5604 * The purpose is to pass it to an operand implementation, thus the a_iArg.
5605 * @remarks May return.
5606 */
5607#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
5608 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
5609
5610/** Commits the memory and unmaps the guest memory.
5611 * @remarks May return.
5612 */
5613#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
5614 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
5615
5616/** Calculate efficient address from R/M. */
5617#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
5618 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
5619
5620#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
5621#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
5622#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
5623#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
5624#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
5625
5626/**
5627 * Defers the rest of the instruction emulation to a C implementation routine
5628 * and returns, only taking the standard parameters.
5629 *
5630 * @param a_pfnCImpl The pointer to the C routine.
5631 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5632 */
5633#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5634
5635/**
5636 * Defers the rest of instruction emulation to a C implementation routine and
5637 * returns, taking one argument in addition to the standard ones.
5638 *
5639 * @param a_pfnCImpl The pointer to the C routine.
5640 * @param a0 The argument.
5641 */
5642#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5643
5644/**
5645 * Defers the rest of the instruction emulation to a C implementation routine
5646 * and returns, taking two arguments in addition to the standard ones.
5647 *
5648 * @param a_pfnCImpl The pointer to the C routine.
5649 * @param a0 The first extra argument.
5650 * @param a1 The second extra argument.
5651 */
5652#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5653
5654/**
5655 * Defers the rest of the instruction emulation to a C implementation routine
5656 * and returns, taking two arguments in addition to the standard ones.
5657 *
5658 * @param a_pfnCImpl The pointer to the C routine.
5659 * @param a0 The first extra argument.
5660 * @param a1 The second extra argument.
5661 * @param a2 The third extra argument.
5662 */
5663#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5664
5665/**
5666 * Defers the rest of the instruction emulation to a C implementation routine
5667 * and returns, taking two arguments in addition to the standard ones.
5668 *
5669 * @param a_pfnCImpl The pointer to the C routine.
5670 * @param a0 The first extra argument.
5671 * @param a1 The second extra argument.
5672 * @param a2 The third extra argument.
5673 * @param a3 The fourth extra argument.
5674 * @param a4 The fifth extra argument.
5675 */
5676#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
5677
5678/**
5679 * Defers the entire instruction emulation to a C implementation routine and
5680 * returns, only taking the standard parameters.
5681 *
5682 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5683 *
5684 * @param a_pfnCImpl The pointer to the C routine.
5685 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
5686 */
5687#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
5688
5689/**
5690 * Defers the entire instruction emulation to a C implementation routine and
5691 * returns, taking one argument in addition to the standard ones.
5692 *
5693 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5694 *
5695 * @param a_pfnCImpl The pointer to the C routine.
5696 * @param a0 The argument.
5697 */
5698#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
5699
5700/**
5701 * Defers the entire instruction emulation to a C implementation routine and
5702 * returns, taking two arguments in addition to the standard ones.
5703 *
5704 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5705 *
5706 * @param a_pfnCImpl The pointer to the C routine.
5707 * @param a0 The first extra argument.
5708 * @param a1 The second extra argument.
5709 */
5710#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
5711
5712/**
5713 * Defers the entire instruction emulation to a C implementation routine and
5714 * returns, taking three arguments in addition to the standard ones.
5715 *
5716 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5717 *
5718 * @param a_pfnCImpl The pointer to the C routine.
5719 * @param a0 The first extra argument.
5720 * @param a1 The second extra argument.
5721 * @param a2 The third extra argument.
5722 */
5723#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
5724
5725/**
5726 * Calls a FPU assembly implementation taking two visible arguments.
5727 *
5728 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5729 *
5730 * @param a_pfnAImpl Pointer to the assembly FPU routine.
5731 * @param a0 The first extra argument.
5732 * @param a1 The second extra argument.
5733 */
5734#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
5735 do { \
5736 iemFpuPrepareUsage(pIemCpu); \
5737 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
5738 } while (0)
5739
5740/**
5741 * Calls a FPU assembly implementation taking three visible arguments.
5742 *
5743 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
5744 *
5745 * @param a_pfnAImpl Pointer to the assembly FPU routine.
5746 * @param a0 The first extra argument.
5747 * @param a1 The second extra argument.
5748 * @param a2 The third extra argument.
5749 */
5750#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
5751 do { \
5752 iemFpuPrepareUsage(pIemCpu); \
5753 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
5754 } while (0)
5755
5756/** Pushes FPU result onto the stack. */
5757#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
5758 iemFpuPushResult(pIemCpu, &a_FpuData)
5759/** Pushes FPU result onto the stack and sets the FPUDP. */
5760#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
5761 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
5762
5763/** Stores FPU result in a stack register. */
5764#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
5765 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
5766/** Stores FPU result in a stack register and sets the FPUDP. */
5767#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
5768 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
5769
5770
5771#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
5772#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
5773#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
5774#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
5775#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
5776 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5777 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5778#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
5779 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5780 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5781#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
5782 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5783 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5784 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5785#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
5786 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
5787 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
5788 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
5789#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
5790#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
5791#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
5792#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5793 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5794 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5795#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5796 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5797 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5798#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
5799 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5800 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5801#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5802 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
5803 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5804#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5805 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
5806 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5807#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
5808 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
5809 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
5810#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
5811#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
5812#define IEM_MC_ELSE() } else {
5813#define IEM_MC_ENDIF() } do {} while (0)
5814
5815/** @} */
5816
5817
5818/** @name Opcode Debug Helpers.
5819 * @{
5820 */
5821#ifdef DEBUG
5822# define IEMOP_MNEMONIC(a_szMnemonic) \
5823 Log2(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5824 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
5825# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
5826 Log2(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
5827 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
5828#else
5829# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
5830# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
5831#endif
5832
5833/** @} */
5834
5835
5836/** @name Opcode Helpers.
5837 * @{
5838 */
5839
5840/** The instruction allows no lock prefixing (in this encoding), throw #UD if
5841 * lock prefixed. */
5842#define IEMOP_HLP_NO_LOCK_PREFIX() \
5843 do \
5844 { \
5845 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
5846 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
5847 } while (0)
5848
5849/** The instruction is not available in 64-bit mode, throw #UD if we're in
5850 * 64-bit mode. */
5851#define IEMOP_HLP_NO_64BIT() \
5852 do \
5853 { \
5854 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5855 return IEMOP_RAISE_INVALID_OPCODE(); \
5856 } while (0)
5857
5858/** The instruction defaults to 64-bit operand size if 64-bit mode. */
5859#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
5860 do \
5861 { \
5862 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
5863 iemRecalEffOpSize64Default(pIemCpu); \
5864 } while (0)
5865
5866
5867
5868/**
5869 * Calculates the effective address of a ModR/M memory operand.
5870 *
5871 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
5872 *
5873 * @return Strict VBox status code.
5874 * @param pIemCpu The IEM per CPU data.
5875 * @param bRm The ModRM byte.
5876 * @param pGCPtrEff Where to return the effective address.
5877 */
5878static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
5879{
5880 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
5881 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5882#define SET_SS_DEF() \
5883 do \
5884 { \
5885 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
5886 pIemCpu->iEffSeg = X86_SREG_SS; \
5887 } while (0)
5888
5889/** @todo Check the effective address size crap! */
5890 switch (pIemCpu->enmEffAddrMode)
5891 {
5892 case IEMMODE_16BIT:
5893 {
5894 uint16_t u16EffAddr;
5895
5896 /* Handle the disp16 form with no registers first. */
5897 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
5898 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
5899 else
5900 {
5901 /* Get the displacment. */
5902 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5903 {
5904 case 0: u16EffAddr = 0; break;
5905 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
5906 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
5907 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
5908 }
5909
5910 /* Add the base and index registers to the disp. */
5911 switch (bRm & X86_MODRM_RM_MASK)
5912 {
5913 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
5914 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
5915 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
5916 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
5917 case 4: u16EffAddr += pCtx->si; break;
5918 case 5: u16EffAddr += pCtx->di; break;
5919 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
5920 case 7: u16EffAddr += pCtx->bx; break;
5921 }
5922 }
5923
5924 *pGCPtrEff = u16EffAddr;
5925 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
5926 return VINF_SUCCESS;
5927 }
5928
5929 case IEMMODE_32BIT:
5930 {
5931 uint32_t u32EffAddr;
5932
5933 /* Handle the disp32 form with no registers first. */
5934 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
5935 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
5936 else
5937 {
5938 /* Get the register (or SIB) value. */
5939 switch ((bRm & X86_MODRM_RM_MASK))
5940 {
5941 case 0: u32EffAddr = pCtx->eax; break;
5942 case 1: u32EffAddr = pCtx->ecx; break;
5943 case 2: u32EffAddr = pCtx->edx; break;
5944 case 3: u32EffAddr = pCtx->ebx; break;
5945 case 4: /* SIB */
5946 {
5947 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
5948
5949 /* Get the index and scale it. */
5950 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
5951 {
5952 case 0: u32EffAddr = pCtx->eax; break;
5953 case 1: u32EffAddr = pCtx->ecx; break;
5954 case 2: u32EffAddr = pCtx->edx; break;
5955 case 3: u32EffAddr = pCtx->ebx; break;
5956 case 4: u32EffAddr = 0; /*none */ break;
5957 case 5: u32EffAddr = pCtx->ebp; break;
5958 case 6: u32EffAddr = pCtx->esi; break;
5959 case 7: u32EffAddr = pCtx->edi; break;
5960 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5961 }
5962 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
5963
5964 /* add base */
5965 switch (bSib & X86_SIB_BASE_MASK)
5966 {
5967 case 0: u32EffAddr += pCtx->eax; break;
5968 case 1: u32EffAddr += pCtx->ecx; break;
5969 case 2: u32EffAddr += pCtx->edx; break;
5970 case 3: u32EffAddr += pCtx->ebx; break;
5971 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
5972 case 5:
5973 if ((bRm & X86_MODRM_MOD_MASK) != 0)
5974 {
5975 u32EffAddr += pCtx->ebp;
5976 SET_SS_DEF();
5977 }
5978 else
5979 {
5980 uint32_t u32Disp;
5981 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
5982 u32EffAddr += u32Disp;
5983 }
5984 break;
5985 case 6: u32EffAddr += pCtx->esi; break;
5986 case 7: u32EffAddr += pCtx->edi; break;
5987 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5988 }
5989 break;
5990 }
5991 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
5992 case 6: u32EffAddr = pCtx->esi; break;
5993 case 7: u32EffAddr = pCtx->edi; break;
5994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5995 }
5996
5997 /* Get and add the displacement. */
5998 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
5999 {
6000 case 0:
6001 break;
6002 case 1:
6003 {
6004 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6005 u32EffAddr += i8Disp;
6006 break;
6007 }
6008 case 2:
6009 {
6010 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6011 u32EffAddr += u32Disp;
6012 break;
6013 }
6014 default:
6015 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
6016 }
6017
6018 }
6019 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
6020 *pGCPtrEff = u32EffAddr;
6021 else
6022 {
6023 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
6024 *pGCPtrEff = u32EffAddr & UINT16_MAX;
6025 }
6026 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6027 return VINF_SUCCESS;
6028 }
6029
6030 case IEMMODE_64BIT:
6031 {
6032 uint64_t u64EffAddr;
6033
6034 /* Handle the rip+disp32 form with no registers first. */
6035 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
6036 {
6037 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
6038 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
6039 }
6040 else
6041 {
6042 /* Get the register (or SIB) value. */
6043 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
6044 {
6045 case 0: u64EffAddr = pCtx->rax; break;
6046 case 1: u64EffAddr = pCtx->rcx; break;
6047 case 2: u64EffAddr = pCtx->rdx; break;
6048 case 3: u64EffAddr = pCtx->rbx; break;
6049 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
6050 case 6: u64EffAddr = pCtx->rsi; break;
6051 case 7: u64EffAddr = pCtx->rdi; break;
6052 case 8: u64EffAddr = pCtx->r8; break;
6053 case 9: u64EffAddr = pCtx->r9; break;
6054 case 10: u64EffAddr = pCtx->r10; break;
6055 case 11: u64EffAddr = pCtx->r11; break;
6056 case 13: u64EffAddr = pCtx->r13; break;
6057 case 14: u64EffAddr = pCtx->r14; break;
6058 case 15: u64EffAddr = pCtx->r15; break;
6059 /* SIB */
6060 case 4:
6061 case 12:
6062 {
6063 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
6064
6065 /* Get the index and scale it. */
6066 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
6067 {
6068 case 0: u64EffAddr = pCtx->rax; break;
6069 case 1: u64EffAddr = pCtx->rcx; break;
6070 case 2: u64EffAddr = pCtx->rdx; break;
6071 case 3: u64EffAddr = pCtx->rbx; break;
6072 case 4: u64EffAddr = 0; /*none */ break;
6073 case 5: u64EffAddr = pCtx->rbp; break;
6074 case 6: u64EffAddr = pCtx->rsi; break;
6075 case 7: u64EffAddr = pCtx->rdi; break;
6076 case 8: u64EffAddr = pCtx->r8; break;
6077 case 9: u64EffAddr = pCtx->r9; break;
6078 case 10: u64EffAddr = pCtx->r10; break;
6079 case 11: u64EffAddr = pCtx->r11; break;
6080 case 12: u64EffAddr = pCtx->r12; break;
6081 case 13: u64EffAddr = pCtx->r13; break;
6082 case 14: u64EffAddr = pCtx->r14; break;
6083 case 15: u64EffAddr = pCtx->r15; break;
6084 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6085 }
6086 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
6087
6088 /* add base */
6089 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
6090 {
6091 case 0: u64EffAddr += pCtx->rax; break;
6092 case 1: u64EffAddr += pCtx->rcx; break;
6093 case 2: u64EffAddr += pCtx->rdx; break;
6094 case 3: u64EffAddr += pCtx->rbx; break;
6095 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
6096 case 6: u64EffAddr += pCtx->rsi; break;
6097 case 7: u64EffAddr += pCtx->rdi; break;
6098 case 8: u64EffAddr += pCtx->r8; break;
6099 case 9: u64EffAddr += pCtx->r9; break;
6100 case 10: u64EffAddr += pCtx->r10; break;
6101 case 11: u64EffAddr += pCtx->r11; break;
6102 case 14: u64EffAddr += pCtx->r14; break;
6103 case 15: u64EffAddr += pCtx->r15; break;
6104 /* complicated encodings */
6105 case 5:
6106 case 13:
6107 if ((bRm & X86_MODRM_MOD_MASK) != 0)
6108 {
6109 if (!pIemCpu->uRexB)
6110 {
6111 u64EffAddr += pCtx->rbp;
6112 SET_SS_DEF();
6113 }
6114 else
6115 u64EffAddr += pCtx->r13;
6116 }
6117 else
6118 {
6119 uint32_t u32Disp;
6120 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6121 u64EffAddr += (int32_t)u32Disp;
6122 }
6123 break;
6124 }
6125 break;
6126 }
6127 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6128 }
6129
6130 /* Get and add the displacement. */
6131 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
6132 {
6133 case 0:
6134 break;
6135 case 1:
6136 {
6137 int8_t i8Disp;
6138 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
6139 u64EffAddr += i8Disp;
6140 break;
6141 }
6142 case 2:
6143 {
6144 uint32_t u32Disp;
6145 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
6146 u64EffAddr += (int32_t)u32Disp;
6147 break;
6148 }
6149 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
6150 }
6151
6152 }
6153 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
6154 *pGCPtrEff = u64EffAddr;
6155 else
6156 *pGCPtrEff = u64EffAddr & UINT16_MAX;
6157 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
6158 return VINF_SUCCESS;
6159 }
6160 }
6161
6162 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
6163}
6164
6165/** @} */
6166
6167
6168
6169/*
6170 * Include the instructions
6171 */
6172#include "IEMAllInstructions.cpp.h"
6173
6174
6175
6176
6177#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6178
6179/**
6180 * Sets up execution verification mode.
6181 */
6182static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
6183{
6184 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6185 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
6186
6187 /*
6188 * Enable verification and/or logging.
6189 */
6190 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
6191 if ( pIemCpu->fNoRem
6192#if 0 /* auto enable on first paged protected mode interrupt */
6193 && pOrgCtx->eflags.Bits.u1IF
6194 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
6195 && TRPMHasTrap(pVCpu)
6196 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6197#endif
6198#if 0
6199 && pOrgCtx->cs == 0x10
6200 && ( pOrgCtx->rip == 0x90119e3e
6201 || pOrgCtx->rip == 0x901d9810
6202 )
6203#endif
6204#if 1 /* Auto enable DSL - FPU stuff. */
6205 && pOrgCtx->cs == 0x10
6206 && ( pOrgCtx->rip == 0xc02ec07f
6207 || pOrgCtx->rip == 0xc02ec082
6208 || pOrgCtx->rip == 0xc02ec0c9
6209 )
6210#endif
6211#if 0
6212 && pOrgCtx->rip == 0x9022bb3a
6213#endif
6214#if 0
6215 && 0
6216#endif
6217 )
6218 {
6219 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
6220 RTLogFlags(NULL, "enabled");
6221 pIemCpu->fNoRem = false;
6222 }
6223
6224 /*
6225 * Switch state.
6226 */
6227 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6228 {
6229 static CPUMCTX s_DebugCtx; /* Ugly! */
6230
6231 s_DebugCtx = *pOrgCtx;
6232 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
6233 }
6234
6235 /*
6236 * See if there is an interrupt pending in TRPM and inject it if we can.
6237 */
6238 if ( pOrgCtx->eflags.Bits.u1IF
6239 && TRPMHasTrap(pVCpu)
6240 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
6241 {
6242 uint8_t u8TrapNo;
6243 TRPMEVENT enmType;
6244 RTGCUINT uErrCode;
6245 RTGCPTR uCr2;
6246 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
6247 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
6248 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6249 TRPMResetTrap(pVCpu);
6250 }
6251
6252 /*
6253 * Reset the counters.
6254 */
6255 pIemCpu->cIOReads = 0;
6256 pIemCpu->cIOWrites = 0;
6257 pIemCpu->fUndefinedEFlags = 0;
6258
6259 if (IEM_VERIFICATION_ENABLED(pIemCpu))
6260 {
6261 /*
6262 * Free all verification records.
6263 */
6264 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
6265 pIemCpu->pIemEvtRecHead = NULL;
6266 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
6267 do
6268 {
6269 while (pEvtRec)
6270 {
6271 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
6272 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
6273 pIemCpu->pFreeEvtRec = pEvtRec;
6274 pEvtRec = pNext;
6275 }
6276 pEvtRec = pIemCpu->pOtherEvtRecHead;
6277 pIemCpu->pOtherEvtRecHead = NULL;
6278 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
6279 } while (pEvtRec);
6280 }
6281}
6282
6283
6284/**
6285 * Allocate an event record.
6286 * @returns Poitner to a record.
6287 */
6288static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
6289{
6290 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6291 return NULL;
6292
6293 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
6294 if (pEvtRec)
6295 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
6296 else
6297 {
6298 if (!pIemCpu->ppIemEvtRecNext)
6299 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
6300
6301 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
6302 if (!pEvtRec)
6303 return NULL;
6304 }
6305 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
6306 pEvtRec->pNext = NULL;
6307 return pEvtRec;
6308}
6309
6310
6311/**
6312 * IOMMMIORead notification.
6313 */
6314VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
6315{
6316 PVMCPU pVCpu = VMMGetCpu(pVM);
6317 if (!pVCpu)
6318 return;
6319 PIEMCPU pIemCpu = &pVCpu->iem.s;
6320 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6321 if (!pEvtRec)
6322 return;
6323 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6324 pEvtRec->u.RamRead.GCPhys = GCPhys;
6325 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
6326 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6327 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6328}
6329
6330
6331/**
6332 * IOMMMIOWrite notification.
6333 */
6334VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
6335{
6336 PVMCPU pVCpu = VMMGetCpu(pVM);
6337 if (!pVCpu)
6338 return;
6339 PIEMCPU pIemCpu = &pVCpu->iem.s;
6340 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6341 if (!pEvtRec)
6342 return;
6343 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6344 pEvtRec->u.RamWrite.GCPhys = GCPhys;
6345 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
6346 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
6347 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
6348 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
6349 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
6350 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6351 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6352}
6353
6354
6355/**
6356 * IOMIOPortRead notification.
6357 */
6358VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
6359{
6360 PVMCPU pVCpu = VMMGetCpu(pVM);
6361 if (!pVCpu)
6362 return;
6363 PIEMCPU pIemCpu = &pVCpu->iem.s;
6364 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6365 if (!pEvtRec)
6366 return;
6367 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6368 pEvtRec->u.IOPortRead.Port = Port;
6369 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6370 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6371 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6372}
6373
6374/**
6375 * IOMIOPortWrite notification.
6376 */
6377VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6378{
6379 PVMCPU pVCpu = VMMGetCpu(pVM);
6380 if (!pVCpu)
6381 return;
6382 PIEMCPU pIemCpu = &pVCpu->iem.s;
6383 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6384 if (!pEvtRec)
6385 return;
6386 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6387 pEvtRec->u.IOPortWrite.Port = Port;
6388 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6389 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6390 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
6391 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
6392}
6393
6394
6395VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
6396{
6397 AssertFailed();
6398}
6399
6400
6401VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
6402{
6403 AssertFailed();
6404}
6405
6406
6407/**
6408 * Fakes and records an I/O port read.
6409 *
6410 * @returns VINF_SUCCESS.
6411 * @param pIemCpu The IEM per CPU data.
6412 * @param Port The I/O port.
6413 * @param pu32Value Where to store the fake value.
6414 * @param cbValue The size of the access.
6415 */
6416static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6417{
6418 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6419 if (pEvtRec)
6420 {
6421 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
6422 pEvtRec->u.IOPortRead.Port = Port;
6423 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
6424 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6425 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6426 }
6427 pIemCpu->cIOReads++;
6428 *pu32Value = 0xcccccccc;
6429 return VINF_SUCCESS;
6430}
6431
6432
6433/**
6434 * Fakes and records an I/O port write.
6435 *
6436 * @returns VINF_SUCCESS.
6437 * @param pIemCpu The IEM per CPU data.
6438 * @param Port The I/O port.
6439 * @param u32Value The value being written.
6440 * @param cbValue The size of the access.
6441 */
6442static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6443{
6444 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6445 if (pEvtRec)
6446 {
6447 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
6448 pEvtRec->u.IOPortWrite.Port = Port;
6449 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
6450 pEvtRec->u.IOPortWrite.u32Value = u32Value;
6451 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6452 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6453 }
6454 pIemCpu->cIOWrites++;
6455 return VINF_SUCCESS;
6456}
6457
6458
6459/**
6460 * Used to add extra details about a stub case.
6461 * @param pIemCpu The IEM per CPU state.
6462 */
6463static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
6464{
6465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6466 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6467 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
6468 char szRegs[4096];
6469 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6470 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6471 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6472 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6473 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6474 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6475 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6476 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6477 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6478 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6479 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6480 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6481 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6482 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6483 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6484 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6485 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6486 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6487 " efer=%016VR{efer}\n"
6488 " pat=%016VR{pat}\n"
6489 " sf_mask=%016VR{sf_mask}\n"
6490 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6491 " lstar=%016VR{lstar}\n"
6492 " star=%016VR{star} cstar=%016VR{cstar}\n"
6493 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6494 );
6495
6496 char szInstr1[256];
6497 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip - pIemCpu->offOpcode,
6498 DBGF_DISAS_FLAGS_DEFAULT_MODE,
6499 szInstr1, sizeof(szInstr1), NULL);
6500 char szInstr2[256];
6501 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
6502 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6503 szInstr2, sizeof(szInstr2), NULL);
6504
6505 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
6506}
6507
6508
6509/**
6510 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
6511 * dump to the assertion info.
6512 *
6513 * @param pEvtRec The record to dump.
6514 */
6515static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
6516{
6517 switch (pEvtRec->enmEvent)
6518 {
6519 case IEMVERIFYEVENT_IOPORT_READ:
6520 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
6521 pEvtRec->u.IOPortWrite.Port,
6522 pEvtRec->u.IOPortWrite.cbValue);
6523 break;
6524 case IEMVERIFYEVENT_IOPORT_WRITE:
6525 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
6526 pEvtRec->u.IOPortWrite.Port,
6527 pEvtRec->u.IOPortWrite.cbValue,
6528 pEvtRec->u.IOPortWrite.u32Value);
6529 break;
6530 case IEMVERIFYEVENT_RAM_READ:
6531 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
6532 pEvtRec->u.RamRead.GCPhys,
6533 pEvtRec->u.RamRead.cb);
6534 break;
6535 case IEMVERIFYEVENT_RAM_WRITE:
6536 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",
6537 pEvtRec->u.RamWrite.GCPhys,
6538 pEvtRec->u.RamWrite.cb,
6539 (int)pEvtRec->u.RamWrite.cb,
6540 pEvtRec->u.RamWrite.ab);
6541 break;
6542 default:
6543 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
6544 break;
6545 }
6546}
6547
6548
6549/**
6550 * Raises an assertion on the specified record, showing the given message with
6551 * a record dump attached.
6552 *
6553 * @param pIemCpu The IEM per CPU data.
6554 * @param pEvtRec1 The first record.
6555 * @param pEvtRec2 The second record.
6556 * @param pszMsg The message explaining why we're asserting.
6557 */
6558static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
6559{
6560 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6561 iemVerifyAssertAddRecordDump(pEvtRec1);
6562 iemVerifyAssertAddRecordDump(pEvtRec2);
6563 iemVerifyAssertMsg2(pIemCpu);
6564 RTAssertPanic();
6565}
6566
6567
6568/**
6569 * Raises an assertion on the specified record, showing the given message with
6570 * a record dump attached.
6571 *
6572 * @param pIemCpu The IEM per CPU data.
6573 * @param pEvtRec1 The first record.
6574 * @param pszMsg The message explaining why we're asserting.
6575 */
6576static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
6577{
6578 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6579 iemVerifyAssertAddRecordDump(pEvtRec);
6580 iemVerifyAssertMsg2(pIemCpu);
6581 RTAssertPanic();
6582}
6583
6584
6585/**
6586 * Verifies a write record.
6587 *
6588 * @param pIemCpu The IEM per CPU data.
6589 * @param pEvtRec The write record.
6590 */
6591static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
6592{
6593 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
6594 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
6595 if ( RT_FAILURE(rc)
6596 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
6597 {
6598 /* fend off ins */
6599 if ( !pIemCpu->cIOReads
6600 || pEvtRec->u.RamWrite.ab[0] != 0xcc
6601 || ( pEvtRec->u.RamWrite.cb != 1
6602 && pEvtRec->u.RamWrite.cb != 2
6603 && pEvtRec->u.RamWrite.cb != 4) )
6604 {
6605 /* fend off ROMs */
6606 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
6607 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
6608 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
6609 {
6610 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
6611 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
6612 RTAssertMsg2Add("REM: %.*Rhxs\n"
6613 "IEM: %.*Rhxs\n",
6614 pEvtRec->u.RamWrite.cb, abBuf,
6615 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
6616 iemVerifyAssertAddRecordDump(pEvtRec);
6617 iemVerifyAssertMsg2(pIemCpu);
6618 RTAssertPanic();
6619 }
6620 }
6621 }
6622
6623}
6624
6625/**
6626 * Performs the post-execution verfication checks.
6627 */
6628static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
6629{
6630 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
6631 return;
6632
6633 /*
6634 * Switch back the state.
6635 */
6636 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
6637 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
6638 Assert(pOrgCtx != pDebugCtx);
6639 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6640
6641 /*
6642 * Execute the instruction in REM.
6643 */
6644 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6645 EMRemLock(pVM);
6646 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
6647 AssertRC(rc);
6648 EMRemUnlock(pVM);
6649
6650 /*
6651 * Compare the register states.
6652 */
6653 unsigned cDiffs = 0;
6654 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
6655 {
6656 Log(("REM and IEM ends up with different registers!\n"));
6657
6658# define CHECK_FIELD(a_Field) \
6659 do \
6660 { \
6661 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6662 { \
6663 switch (sizeof(pOrgCtx->a_Field)) \
6664 { \
6665 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6666 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6667 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6668 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
6669 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
6670 } \
6671 cDiffs++; \
6672 } \
6673 } while (0)
6674
6675# define CHECK_BIT_FIELD(a_Field) \
6676 do \
6677 { \
6678 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
6679 { \
6680 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
6681 cDiffs++; \
6682 } \
6683 } while (0)
6684
6685# define CHECK_SEL(a_Sel) \
6686 do \
6687 { \
6688 CHECK_FIELD(a_Sel); \
6689 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \
6690 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \
6691 { \
6692 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
6693 cDiffs++; \
6694 } \
6695 CHECK_FIELD(a_Sel##Hid.u64Base); \
6696 CHECK_FIELD(a_Sel##Hid.u32Limit); \
6697 } while (0)
6698
6699 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
6700 {
6701 RTAssertMsg2Weak(" the FPU state differs\n");
6702 cDiffs++;
6703 CHECK_FIELD(fpu.FCW);
6704 CHECK_FIELD(fpu.FSW);
6705 CHECK_FIELD(fpu.FTW);
6706 CHECK_FIELD(fpu.FOP);
6707 CHECK_FIELD(fpu.FPUIP);
6708 CHECK_FIELD(fpu.CS);
6709 CHECK_FIELD(fpu.Rsrvd1);
6710 CHECK_FIELD(fpu.FPUDP);
6711 CHECK_FIELD(fpu.DS);
6712 CHECK_FIELD(fpu.Rsrvd2);
6713 CHECK_FIELD(fpu.MXCSR);
6714 CHECK_FIELD(fpu.MXCSR_MASK);
6715 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
6716 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
6717 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
6718 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
6719 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
6720 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
6721 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
6722 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
6723 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
6724 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
6725 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
6726 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
6727 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
6728 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
6729 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
6730 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
6731 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
6732 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
6733 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
6734 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
6735 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
6736 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
6737 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
6738 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
6739 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
6740 CHECK_FIELD(fpu.au32RsrvdRest[i]);
6741 }
6742 CHECK_FIELD(rip);
6743 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
6744 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
6745 {
6746 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
6747 CHECK_BIT_FIELD(rflags.Bits.u1CF);
6748 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
6749 CHECK_BIT_FIELD(rflags.Bits.u1PF);
6750 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
6751 CHECK_BIT_FIELD(rflags.Bits.u1AF);
6752 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
6753 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
6754 CHECK_BIT_FIELD(rflags.Bits.u1SF);
6755 CHECK_BIT_FIELD(rflags.Bits.u1TF);
6756 CHECK_BIT_FIELD(rflags.Bits.u1IF);
6757 CHECK_BIT_FIELD(rflags.Bits.u1DF);
6758 CHECK_BIT_FIELD(rflags.Bits.u1OF);
6759 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
6760 CHECK_BIT_FIELD(rflags.Bits.u1NT);
6761 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
6762 CHECK_BIT_FIELD(rflags.Bits.u1RF);
6763 CHECK_BIT_FIELD(rflags.Bits.u1VM);
6764 CHECK_BIT_FIELD(rflags.Bits.u1AC);
6765 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
6766 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
6767 CHECK_BIT_FIELD(rflags.Bits.u1ID);
6768 }
6769
6770 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
6771 CHECK_FIELD(rax);
6772 CHECK_FIELD(rcx);
6773 if (!pIemCpu->fIgnoreRaxRdx)
6774 CHECK_FIELD(rdx);
6775 CHECK_FIELD(rbx);
6776 CHECK_FIELD(rsp);
6777 CHECK_FIELD(rbp);
6778 CHECK_FIELD(rsi);
6779 CHECK_FIELD(rdi);
6780 CHECK_FIELD(r8);
6781 CHECK_FIELD(r9);
6782 CHECK_FIELD(r10);
6783 CHECK_FIELD(r11);
6784 CHECK_FIELD(r12);
6785 CHECK_FIELD(r13);
6786 CHECK_SEL(cs);
6787 CHECK_SEL(ss);
6788 CHECK_SEL(ds);
6789 CHECK_SEL(es);
6790 CHECK_SEL(fs);
6791 CHECK_SEL(gs);
6792 CHECK_FIELD(cr0);
6793 CHECK_FIELD(cr2);
6794 CHECK_FIELD(cr3);
6795 CHECK_FIELD(cr4);
6796 CHECK_FIELD(dr[0]);
6797 CHECK_FIELD(dr[1]);
6798 CHECK_FIELD(dr[2]);
6799 CHECK_FIELD(dr[3]);
6800 CHECK_FIELD(dr[6]);
6801 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
6802 CHECK_FIELD(dr[7]);
6803 CHECK_FIELD(gdtr.cbGdt);
6804 CHECK_FIELD(gdtr.pGdt);
6805 CHECK_FIELD(idtr.cbIdt);
6806 CHECK_FIELD(idtr.pIdt);
6807 CHECK_FIELD(ldtr);
6808 CHECK_FIELD(ldtrHid.u64Base);
6809 CHECK_FIELD(ldtrHid.u32Limit);
6810 CHECK_FIELD(ldtrHid.Attr.u);
6811 CHECK_FIELD(tr);
6812 CHECK_FIELD(trHid.u64Base);
6813 CHECK_FIELD(trHid.u32Limit);
6814 CHECK_FIELD(trHid.Attr.u);
6815 CHECK_FIELD(SysEnter.cs);
6816 CHECK_FIELD(SysEnter.eip);
6817 CHECK_FIELD(SysEnter.esp);
6818 CHECK_FIELD(msrEFER);
6819 CHECK_FIELD(msrSTAR);
6820 CHECK_FIELD(msrPAT);
6821 CHECK_FIELD(msrLSTAR);
6822 CHECK_FIELD(msrCSTAR);
6823 CHECK_FIELD(msrSFMASK);
6824 CHECK_FIELD(msrKERNELGSBASE);
6825
6826 if (cDiffs != 0)
6827 {
6828 if (LogIs3Enabled())
6829 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
6830 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
6831 iemVerifyAssertMsg2(pIemCpu);
6832 RTAssertPanic();
6833 }
6834# undef CHECK_FIELD
6835# undef CHECK_BIT_FIELD
6836 }
6837
6838 /*
6839 * If the register state compared fine, check the verification event
6840 * records.
6841 */
6842 if (cDiffs == 0)
6843 {
6844 /*
6845 * Compare verficiation event records.
6846 * - I/O port accesses should be a 1:1 match.
6847 */
6848 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
6849 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
6850 while (pIemRec && pOtherRec)
6851 {
6852 /* Since we might miss RAM writes and reads, ignore reads and check
6853 that any written memory is the same extra ones. */
6854 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
6855 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
6856 && pIemRec->pNext)
6857 {
6858 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6859 iemVerifyWriteRecord(pIemCpu, pIemRec);
6860 pIemRec = pIemRec->pNext;
6861 }
6862
6863 /* Do the compare. */
6864 if (pIemRec->enmEvent != pOtherRec->enmEvent)
6865 {
6866 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
6867 break;
6868 }
6869 bool fEquals;
6870 switch (pIemRec->enmEvent)
6871 {
6872 case IEMVERIFYEVENT_IOPORT_READ:
6873 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
6874 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
6875 break;
6876 case IEMVERIFYEVENT_IOPORT_WRITE:
6877 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
6878 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
6879 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
6880 break;
6881 case IEMVERIFYEVENT_RAM_READ:
6882 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
6883 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
6884 break;
6885 case IEMVERIFYEVENT_RAM_WRITE:
6886 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
6887 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
6888 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
6889 break;
6890 default:
6891 fEquals = false;
6892 break;
6893 }
6894 if (!fEquals)
6895 {
6896 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
6897 break;
6898 }
6899
6900 /* advance */
6901 pIemRec = pIemRec->pNext;
6902 pOtherRec = pOtherRec->pNext;
6903 }
6904
6905 /* Ignore extra writes and reads. */
6906 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
6907 {
6908 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
6909 iemVerifyWriteRecord(pIemCpu, pIemRec);
6910 pIemRec = pIemRec->pNext;
6911 }
6912 if (pIemRec != NULL)
6913 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
6914 else if (pOtherRec != NULL)
6915 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
6916 }
6917 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
6918
6919#if 0
6920 /*
6921 * HACK ALERT! You don't normally want to verify a whole boot sequence.
6922 */
6923 if (pIemCpu->cInstructions == 1)
6924 RTLogFlags(NULL, "disabled");
6925#endif
6926}
6927
6928#else /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6929
6930/* stubs */
6931static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
6932{
6933 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
6934 return VERR_INTERNAL_ERROR;
6935}
6936
6937static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
6938{
6939 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
6940 return VERR_INTERNAL_ERROR;
6941}
6942
6943#endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */
6944
6945
6946/**
6947 * Execute one instruction.
6948 *
6949 * @return Strict VBox status code.
6950 * @param pVCpu The current virtual CPU.
6951 */
6952VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
6953{
6954 PIEMCPU pIemCpu = &pVCpu->iem.s;
6955
6956#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6957 iemExecVerificationModeSetup(pIemCpu);
6958#endif
6959#ifdef LOG_ENABLED
6960 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6961 if (LogIs2Enabled())
6962 {
6963 char szInstr[256];
6964 uint32_t cbInstr = 0;
6965 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
6966 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6967 szInstr, sizeof(szInstr), &cbInstr);
6968
6969 Log2(("**** "
6970 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
6971 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
6972 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
6973 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
6974 " %s\n"
6975 ,
6976 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
6977 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
6978 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,
6979 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,
6980 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
6981 szInstr));
6982
6983 if (LogIs3Enabled())
6984 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
6985 }
6986#endif
6987
6988 /*
6989 * Do the decoding and emulation.
6990 */
6991 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
6992 if (rcStrict != VINF_SUCCESS)
6993 {
6994#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
6995 iemExecVerificationModeCheck(pIemCpu);
6996#endif
6997 return rcStrict;
6998 }
6999
7000 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7001 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7002 if (rcStrict == VINF_SUCCESS)
7003 pIemCpu->cInstructions++;
7004//#ifdef DEBUG
7005// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
7006//#endif
7007
7008 /* Execute the next instruction as well if a cli, pop ss or
7009 mov ss, Gr has just completed successfully. */
7010 if ( rcStrict == VINF_SUCCESS
7011 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7012 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
7013 {
7014 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);
7015 if (rcStrict == VINF_SUCCESS)
7016 {
7017 b; IEM_OPCODE_GET_NEXT_U8(&b);
7018 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
7019 if (rcStrict == VINF_SUCCESS)
7020 pIemCpu->cInstructions++;
7021 }
7022 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
7023 }
7024
7025#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
7026 /*
7027 * Assert some sanity.
7028 */
7029 iemExecVerificationModeCheck(pIemCpu);
7030#endif
7031 return rcStrict;
7032}
7033
7034
7035/**
7036 * Injects a trap, fault, abort, software interrupt or external interrupt.
7037 *
7038 * The parameter list matches TRPMQueryTrapAll pretty closely.
7039 *
7040 * @returns Strict VBox status code.
7041 * @param pVCpu The current virtual CPU.
7042 * @param u8TrapNo The trap number.
7043 * @param enmType What type is it (trap/fault/abort), software
7044 * interrupt or hardware interrupt.
7045 * @param uErrCode The error code if applicable.
7046 * @param uCr2 The CR2 value if applicable.
7047 */
7048VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
7049{
7050 iemInitDecoder(&pVCpu->iem.s);
7051
7052 uint32_t fFlags;
7053 switch (enmType)
7054 {
7055 case TRPM_HARDWARE_INT:
7056 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
7057 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
7058 uErrCode = uCr2 = 0;
7059 break;
7060
7061 case TRPM_SOFTWARE_INT:
7062 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
7063 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7064 uErrCode = uCr2 = 0;
7065 break;
7066
7067 case TRPM_TRAP:
7068 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
7069 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
7070 if (u8TrapNo == X86_XCPT_PF)
7071 fFlags |= IEM_XCPT_FLAGS_CR2;
7072 switch (u8TrapNo)
7073 {
7074 case X86_XCPT_DF:
7075 case X86_XCPT_TS:
7076 case X86_XCPT_NP:
7077 case X86_XCPT_SS:
7078 case X86_XCPT_PF:
7079 case X86_XCPT_AC:
7080 fFlags |= IEM_XCPT_FLAGS_ERR;
7081 break;
7082 }
7083 break;
7084
7085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7086 }
7087
7088 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
7089}
7090
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette