VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 47683

Last change on this file since 47683 was 47671, checked in by vboxsync, 12 years ago

VMM: More debugging related stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 369.2 KB
Line 
1/* $Id: IEMAll.cpp 47671 2013-08-12 11:16:55Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pdm.h>
86#include <VBox/vmm/pgm.h>
87#include <internal/pgm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/em.h>
90#include <VBox/vmm/hm.h>
91#include <VBox/vmm/tm.h>
92#include <VBox/vmm/dbgf.h>
93#include <VBox/vmm/dbgftrace.h>
94#ifdef VBOX_WITH_RAW_MODE_NOT_R0
95# include <VBox/vmm/patm.h>
96#endif
97#include "IEMInternal.h"
98#ifdef IEM_VERIFICATION_MODE_FULL
99# include <VBox/vmm/rem.h>
100# include <VBox/vmm/mm.h>
101#endif
102#include <VBox/vmm/vm.h>
103#include <VBox/log.h>
104#include <VBox/err.h>
105#include <VBox/param.h>
106#include <VBox/dis.h>
107#include <VBox/disopcode.h>
108#include <iprt/assert.h>
109#include <iprt/string.h>
110#include <iprt/x86.h>
111
112
113
114/*******************************************************************************
115* Structures and Typedefs *
116*******************************************************************************/
117/** @typedef PFNIEMOP
118 * Pointer to an opcode decoder function.
119 */
120
121/** @def FNIEMOP_DEF
122 * Define an opcode decoder function.
123 *
124 * We're using macors for this so that adding and removing parameters as well as
125 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
126 *
127 * @param a_Name The function name.
128 */
129
130
131#if defined(__GNUC__) && defined(RT_ARCH_X86)
132typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
133# define FNIEMOP_DEF(a_Name) \
134 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
135# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
136 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
137# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
139
140#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
141typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
142# define FNIEMOP_DEF(a_Name) \
143 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
144# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
145 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
146# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
148
149#elif defined(__GNUC__)
150typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
151# define FNIEMOP_DEF(a_Name) \
152 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
153# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
154 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
155# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
157
158#else
159typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
160# define FNIEMOP_DEF(a_Name) \
161 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
162# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
163 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
164# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
166
167#endif
168
169
170/**
171 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
172 */
173typedef union IEMSELDESC
174{
175 /** The legacy view. */
176 X86DESC Legacy;
177 /** The long mode view. */
178 X86DESC64 Long;
179} IEMSELDESC;
180/** Pointer to a selector descriptor table entry. */
181typedef IEMSELDESC *PIEMSELDESC;
182
183
184/*******************************************************************************
185* Defined Constants And Macros *
186*******************************************************************************/
187/** @name IEM status codes.
188 *
189 * Not quite sure how this will play out in the end, just aliasing safe status
190 * codes for now.
191 *
192 * @{ */
193#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
194/** @} */
195
196/** Temporary hack to disable the double execution. Will be removed in favor
197 * of a dedicated execution mode in EM. */
198//#define IEM_VERIFICATION_MODE_NO_REM
199
200/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
201 * due to GCC lacking knowledge about the value range of a switch. */
202#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
203
204/**
205 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
206 * occation.
207 */
208#ifdef LOG_ENABLED
209# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
210 do { \
211 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
212 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
213 } while (0)
214#else
215# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
216 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
217#endif
218
219/**
220 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
221 * occation using the supplied logger statement.
222 *
223 * @param a_LoggerArgs What to log on failure.
224 */
225#ifdef LOG_ENABLED
226# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
227 do { \
228 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
229 /*LogFunc(a_LoggerArgs);*/ \
230 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
231 } while (0)
232#else
233# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
234 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
235#endif
236
237/**
238 * Call an opcode decoder function.
239 *
240 * We're using macors for this so that adding and removing parameters can be
241 * done as we please. See FNIEMOP_DEF.
242 */
243#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
244
245/**
246 * Call a common opcode decoder function taking one extra argument.
247 *
248 * We're using macors for this so that adding and removing parameters can be
249 * done as we please. See FNIEMOP_DEF_1.
250 */
251#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
252
253/**
254 * Call a common opcode decoder function taking one extra argument.
255 *
256 * We're using macors for this so that adding and removing parameters can be
257 * done as we please. See FNIEMOP_DEF_1.
258 */
259#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
260
261/**
262 * Check if we're currently executing in real or virtual 8086 mode.
263 *
264 * @returns @c true if it is, @c false if not.
265 * @param a_pIemCpu The IEM state of the current CPU.
266 */
267#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
268
269/**
270 * Check if we're currently executing in long mode.
271 *
272 * @returns @c true if it is, @c false if not.
273 * @param a_pIemCpu The IEM state of the current CPU.
274 */
275#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
276
277/**
278 * Check if we're currently executing in real mode.
279 *
280 * @returns @c true if it is, @c false if not.
281 * @param a_pIemCpu The IEM state of the current CPU.
282 */
283#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
284
285/**
286 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
287 */
288#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
289
290/**
291 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
292 */
293#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
294
295/**
296 * Tests if at least on of the specified AMD CPUID features (extended) are
297 * marked present.
298 */
299#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
300
301/**
302 * Checks if an Intel CPUID feature is present.
303 */
304#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
305 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
306 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
307
308/**
309 * Checks if an Intel CPUID feature is present.
310 */
311#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
312 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
313
314/**
315 * Checks if an Intel CPUID feature is present in the host CPU.
316 */
317#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
318 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
319
320/**
321 * Evaluates to true if we're presenting an Intel CPU to the guest.
322 */
323#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
324
325/**
326 * Evaluates to true if we're presenting an AMD CPU to the guest.
327 */
328#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
329
330/**
331 * Check if the address is canonical.
332 */
333#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
334
335
336/*******************************************************************************
337* Global Variables *
338*******************************************************************************/
339extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
340
341
342/** Function table for the ADD instruction. */
343static const IEMOPBINSIZES g_iemAImpl_add =
344{
345 iemAImpl_add_u8, iemAImpl_add_u8_locked,
346 iemAImpl_add_u16, iemAImpl_add_u16_locked,
347 iemAImpl_add_u32, iemAImpl_add_u32_locked,
348 iemAImpl_add_u64, iemAImpl_add_u64_locked
349};
350
351/** Function table for the ADC instruction. */
352static const IEMOPBINSIZES g_iemAImpl_adc =
353{
354 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
355 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
356 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
357 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
358};
359
360/** Function table for the SUB instruction. */
361static const IEMOPBINSIZES g_iemAImpl_sub =
362{
363 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
364 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
365 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
366 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
367};
368
369/** Function table for the SBB instruction. */
370static const IEMOPBINSIZES g_iemAImpl_sbb =
371{
372 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
373 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
374 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
375 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
376};
377
378/** Function table for the OR instruction. */
379static const IEMOPBINSIZES g_iemAImpl_or =
380{
381 iemAImpl_or_u8, iemAImpl_or_u8_locked,
382 iemAImpl_or_u16, iemAImpl_or_u16_locked,
383 iemAImpl_or_u32, iemAImpl_or_u32_locked,
384 iemAImpl_or_u64, iemAImpl_or_u64_locked
385};
386
387/** Function table for the XOR instruction. */
388static const IEMOPBINSIZES g_iemAImpl_xor =
389{
390 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
391 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
392 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
393 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
394};
395
396/** Function table for the AND instruction. */
397static const IEMOPBINSIZES g_iemAImpl_and =
398{
399 iemAImpl_and_u8, iemAImpl_and_u8_locked,
400 iemAImpl_and_u16, iemAImpl_and_u16_locked,
401 iemAImpl_and_u32, iemAImpl_and_u32_locked,
402 iemAImpl_and_u64, iemAImpl_and_u64_locked
403};
404
405/** Function table for the CMP instruction.
406 * @remarks Making operand order ASSUMPTIONS.
407 */
408static const IEMOPBINSIZES g_iemAImpl_cmp =
409{
410 iemAImpl_cmp_u8, NULL,
411 iemAImpl_cmp_u16, NULL,
412 iemAImpl_cmp_u32, NULL,
413 iemAImpl_cmp_u64, NULL
414};
415
416/** Function table for the TEST instruction.
417 * @remarks Making operand order ASSUMPTIONS.
418 */
419static const IEMOPBINSIZES g_iemAImpl_test =
420{
421 iemAImpl_test_u8, NULL,
422 iemAImpl_test_u16, NULL,
423 iemAImpl_test_u32, NULL,
424 iemAImpl_test_u64, NULL
425};
426
427/** Function table for the BT instruction. */
428static const IEMOPBINSIZES g_iemAImpl_bt =
429{
430 NULL, NULL,
431 iemAImpl_bt_u16, NULL,
432 iemAImpl_bt_u32, NULL,
433 iemAImpl_bt_u64, NULL
434};
435
436/** Function table for the BTC instruction. */
437static const IEMOPBINSIZES g_iemAImpl_btc =
438{
439 NULL, NULL,
440 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
441 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
442 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
443};
444
445/** Function table for the BTR instruction. */
446static const IEMOPBINSIZES g_iemAImpl_btr =
447{
448 NULL, NULL,
449 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
450 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
451 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
452};
453
454/** Function table for the BTS instruction. */
455static const IEMOPBINSIZES g_iemAImpl_bts =
456{
457 NULL, NULL,
458 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
459 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
460 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
461};
462
463/** Function table for the BSF instruction. */
464static const IEMOPBINSIZES g_iemAImpl_bsf =
465{
466 NULL, NULL,
467 iemAImpl_bsf_u16, NULL,
468 iemAImpl_bsf_u32, NULL,
469 iemAImpl_bsf_u64, NULL
470};
471
472/** Function table for the BSR instruction. */
473static const IEMOPBINSIZES g_iemAImpl_bsr =
474{
475 NULL, NULL,
476 iemAImpl_bsr_u16, NULL,
477 iemAImpl_bsr_u32, NULL,
478 iemAImpl_bsr_u64, NULL
479};
480
481/** Function table for the IMUL instruction. */
482static const IEMOPBINSIZES g_iemAImpl_imul_two =
483{
484 NULL, NULL,
485 iemAImpl_imul_two_u16, NULL,
486 iemAImpl_imul_two_u32, NULL,
487 iemAImpl_imul_two_u64, NULL
488};
489
490/** Group 1 /r lookup table. */
491static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
492{
493 &g_iemAImpl_add,
494 &g_iemAImpl_or,
495 &g_iemAImpl_adc,
496 &g_iemAImpl_sbb,
497 &g_iemAImpl_and,
498 &g_iemAImpl_sub,
499 &g_iemAImpl_xor,
500 &g_iemAImpl_cmp
501};
502
503/** Function table for the INC instruction. */
504static const IEMOPUNARYSIZES g_iemAImpl_inc =
505{
506 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
507 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
508 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
509 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
510};
511
512/** Function table for the DEC instruction. */
513static const IEMOPUNARYSIZES g_iemAImpl_dec =
514{
515 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
516 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
517 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
518 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
519};
520
521/** Function table for the NEG instruction. */
522static const IEMOPUNARYSIZES g_iemAImpl_neg =
523{
524 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
525 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
526 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
527 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
528};
529
530/** Function table for the NOT instruction. */
531static const IEMOPUNARYSIZES g_iemAImpl_not =
532{
533 iemAImpl_not_u8, iemAImpl_not_u8_locked,
534 iemAImpl_not_u16, iemAImpl_not_u16_locked,
535 iemAImpl_not_u32, iemAImpl_not_u32_locked,
536 iemAImpl_not_u64, iemAImpl_not_u64_locked
537};
538
539
540/** Function table for the ROL instruction. */
541static const IEMOPSHIFTSIZES g_iemAImpl_rol =
542{
543 iemAImpl_rol_u8,
544 iemAImpl_rol_u16,
545 iemAImpl_rol_u32,
546 iemAImpl_rol_u64
547};
548
549/** Function table for the ROR instruction. */
550static const IEMOPSHIFTSIZES g_iemAImpl_ror =
551{
552 iemAImpl_ror_u8,
553 iemAImpl_ror_u16,
554 iemAImpl_ror_u32,
555 iemAImpl_ror_u64
556};
557
558/** Function table for the RCL instruction. */
559static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
560{
561 iemAImpl_rcl_u8,
562 iemAImpl_rcl_u16,
563 iemAImpl_rcl_u32,
564 iemAImpl_rcl_u64
565};
566
567/** Function table for the RCR instruction. */
568static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
569{
570 iemAImpl_rcr_u8,
571 iemAImpl_rcr_u16,
572 iemAImpl_rcr_u32,
573 iemAImpl_rcr_u64
574};
575
576/** Function table for the SHL instruction. */
577static const IEMOPSHIFTSIZES g_iemAImpl_shl =
578{
579 iemAImpl_shl_u8,
580 iemAImpl_shl_u16,
581 iemAImpl_shl_u32,
582 iemAImpl_shl_u64
583};
584
585/** Function table for the SHR instruction. */
586static const IEMOPSHIFTSIZES g_iemAImpl_shr =
587{
588 iemAImpl_shr_u8,
589 iemAImpl_shr_u16,
590 iemAImpl_shr_u32,
591 iemAImpl_shr_u64
592};
593
594/** Function table for the SAR instruction. */
595static const IEMOPSHIFTSIZES g_iemAImpl_sar =
596{
597 iemAImpl_sar_u8,
598 iemAImpl_sar_u16,
599 iemAImpl_sar_u32,
600 iemAImpl_sar_u64
601};
602
603
604/** Function table for the MUL instruction. */
605static const IEMOPMULDIVSIZES g_iemAImpl_mul =
606{
607 iemAImpl_mul_u8,
608 iemAImpl_mul_u16,
609 iemAImpl_mul_u32,
610 iemAImpl_mul_u64
611};
612
613/** Function table for the IMUL instruction working implicitly on rAX. */
614static const IEMOPMULDIVSIZES g_iemAImpl_imul =
615{
616 iemAImpl_imul_u8,
617 iemAImpl_imul_u16,
618 iemAImpl_imul_u32,
619 iemAImpl_imul_u64
620};
621
622/** Function table for the DIV instruction. */
623static const IEMOPMULDIVSIZES g_iemAImpl_div =
624{
625 iemAImpl_div_u8,
626 iemAImpl_div_u16,
627 iemAImpl_div_u32,
628 iemAImpl_div_u64
629};
630
631/** Function table for the MUL instruction. */
632static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
633{
634 iemAImpl_idiv_u8,
635 iemAImpl_idiv_u16,
636 iemAImpl_idiv_u32,
637 iemAImpl_idiv_u64
638};
639
640/** Function table for the SHLD instruction */
641static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
642{
643 iemAImpl_shld_u16,
644 iemAImpl_shld_u32,
645 iemAImpl_shld_u64,
646};
647
648/** Function table for the SHRD instruction */
649static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
650{
651 iemAImpl_shrd_u16,
652 iemAImpl_shrd_u32,
653 iemAImpl_shrd_u64,
654};
655
656
657/** Function table for the PUNPCKLBW instruction */
658static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
659/** Function table for the PUNPCKLBD instruction */
660static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
661/** Function table for the PUNPCKLDQ instruction */
662static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
663/** Function table for the PUNPCKLQDQ instruction */
664static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
665
666/** Function table for the PUNPCKHBW instruction */
667static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
668/** Function table for the PUNPCKHBD instruction */
669static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
670/** Function table for the PUNPCKHDQ instruction */
671static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
672/** Function table for the PUNPCKHQDQ instruction */
673static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
674
675/** Function table for the PXOR instruction */
676static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
677/** Function table for the PCMPEQB instruction */
678static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
679/** Function table for the PCMPEQW instruction */
680static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
681/** Function table for the PCMPEQD instruction */
682static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
683
684
685#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
686/** What IEM just wrote. */
687uint8_t g_abIemWrote[256];
688/** How much IEM just wrote. */
689size_t g_cbIemWrote;
690#endif
691
692
693/*******************************************************************************
694* Internal Functions *
695*******************************************************************************/
696static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
697static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
698static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
699/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
700static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
701static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
702static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
703static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
704static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
705static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
706static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
707static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
708static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
709static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
710static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
711static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
712static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
713static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
714static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
715static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
716static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
717static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
718static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
719static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
720static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
721static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
722static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
723static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
724
725#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
726static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
727#endif
728static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
729static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
730
731static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl);
732
733
734/**
735 * Sets the pass up status.
736 *
737 * @returns VINF_SUCCESS.
738 * @param pIemCpu The per CPU IEM state of the calling thread.
739 * @param rcPassUp The pass up status. Must be informational.
740 * VINF_SUCCESS is not allowed.
741 */
742static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
743{
744 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
745
746 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
747 if (rcOldPassUp == VINF_SUCCESS)
748 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
749 /* If both are EM scheduling codes, use EM priority rules. */
750 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
751 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
752 {
753 if (rcPassUp < rcOldPassUp)
754 {
755 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
757 }
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 }
761 /* Override EM scheduling with specific status code. */
762 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
763 {
764 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
765 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
766 }
767 /* Don't override specific status code, first come first served. */
768 else
769 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * Initializes the execution state.
776 *
777 * @param pIemCpu The per CPU IEM state.
778 * @param fBypassHandlers Whether to bypass access handlers.
779 */
780DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
781{
782 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
783 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
784
785#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
788 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
789 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
790 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
791 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
792 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
793 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
794#endif
795
796#ifdef VBOX_WITH_RAW_MODE_NOT_R0
797 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
798#endif
799 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
800 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
801 ? IEMMODE_64BIT
802 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
803 ? IEMMODE_32BIT
804 : IEMMODE_16BIT;
805 pIemCpu->enmCpuMode = enmMode;
806#ifdef VBOX_STRICT
807 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
808 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
809 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
810 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
811 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
812 pIemCpu->uRexReg = 127;
813 pIemCpu->uRexB = 127;
814 pIemCpu->uRexIndex = 127;
815 pIemCpu->iEffSeg = 127;
816 pIemCpu->offOpcode = 127;
817 pIemCpu->cbOpcode = 127;
818#endif
819
820 pIemCpu->cActiveMappings = 0;
821 pIemCpu->iNextMapping = 0;
822 pIemCpu->rcPassUp = VINF_SUCCESS;
823 pIemCpu->fBypassHandlers = fBypassHandlers;
824#ifdef VBOX_WITH_RAW_MODE_NOT_R0
825 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
826 && pCtx->cs.u64Base == 0
827 && pCtx->cs.u32Limit == UINT32_MAX
828 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
829 if (!pIemCpu->fInPatchCode)
830 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
831#endif
832}
833
834
835/**
836 * Initializes the decoder state.
837 *
838 * @param pIemCpu The per CPU IEM state.
839 * @param fBypassHandlers Whether to bypass access handlers.
840 */
841DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
842{
843 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
844 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
845
846#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
855#endif
856
857#ifdef VBOX_WITH_RAW_MODE_NOT_R0
858 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
859#endif
860 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
861#ifdef IEM_VERIFICATION_MODE_FULL
862 if (pIemCpu->uInjectCpl != UINT8_MAX)
863 pIemCpu->uCpl = pIemCpu->uInjectCpl;
864#endif
865 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
866 ? IEMMODE_64BIT
867 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
868 ? IEMMODE_32BIT
869 : IEMMODE_16BIT;
870 pIemCpu->enmCpuMode = enmMode;
871 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
872 pIemCpu->enmEffAddrMode = enmMode;
873 if (enmMode != IEMMODE_64BIT)
874 {
875 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
876 pIemCpu->enmEffOpSize = enmMode;
877 }
878 else
879 {
880 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
881 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
882 }
883 pIemCpu->fPrefixes = 0;
884 pIemCpu->uRexReg = 0;
885 pIemCpu->uRexB = 0;
886 pIemCpu->uRexIndex = 0;
887 pIemCpu->iEffSeg = X86_SREG_DS;
888 pIemCpu->offOpcode = 0;
889 pIemCpu->cbOpcode = 0;
890 pIemCpu->cActiveMappings = 0;
891 pIemCpu->iNextMapping = 0;
892 pIemCpu->rcPassUp = VINF_SUCCESS;
893 pIemCpu->fBypassHandlers = fBypassHandlers;
894#ifdef VBOX_WITH_RAW_MODE_NOT_R0
895 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
896 && pCtx->cs.u64Base == 0
897 && pCtx->cs.u32Limit == UINT32_MAX
898 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
899 if (!pIemCpu->fInPatchCode)
900 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
901#endif
902
903#ifdef DBGFTRACE_ENABLED
904 switch (enmMode)
905 {
906 case IEMMODE_64BIT:
907 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
908 break;
909 case IEMMODE_32BIT:
910 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
911 break;
912 case IEMMODE_16BIT:
913 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
914 break;
915 }
916#endif
917}
918
919
920/**
921 * Prefetch opcodes the first time when starting executing.
922 *
923 * @returns Strict VBox status code.
924 * @param pIemCpu The IEM state.
925 * @param fBypassHandlers Whether to bypass access handlers.
926 */
927static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
928{
929#ifdef IEM_VERIFICATION_MODE_FULL
930 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
931#endif
932 iemInitDecoder(pIemCpu, fBypassHandlers);
933
934 /*
935 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
936 *
937 * First translate CS:rIP to a physical address.
938 */
939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
940 uint32_t cbToTryRead;
941 RTGCPTR GCPtrPC;
942 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
943 {
944 cbToTryRead = PAGE_SIZE;
945 GCPtrPC = pCtx->rip;
946 if (!IEM_IS_CANONICAL(GCPtrPC))
947 return iemRaiseGeneralProtectionFault0(pIemCpu);
948 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
949 }
950 else
951 {
952 uint32_t GCPtrPC32 = pCtx->eip;
953 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
954 if (GCPtrPC32 > pCtx->cs.u32Limit)
955 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
956 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
957 if (!cbToTryRead) /* overflowed */
958 {
959 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
960 cbToTryRead = UINT32_MAX;
961 }
962 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
963 }
964
965#ifdef VBOX_WITH_RAW_MODE_NOT_R0
966 /* Allow interpretation of patch manager code blocks since they can for
967 instance throw #PFs for perfectly good reasons. */
968 if (pIemCpu->fInPatchCode)
969 {
970 size_t cbRead = 0;
971 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
972 AssertRCReturn(rc, rc);
973 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
974 return VINF_SUCCESS;
975 }
976#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
977
978 RTGCPHYS GCPhys;
979 uint64_t fFlags;
980 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
981 if (RT_FAILURE(rc))
982 {
983 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
984 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
985 }
986 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
987 {
988 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
989 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
990 }
991 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
992 {
993 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
994 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
995 }
996 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
997 /** @todo Check reserved bits and such stuff. PGM is better at doing
998 * that, so do it when implementing the guest virtual address
999 * TLB... */
1000
1001#ifdef IEM_VERIFICATION_MODE_FULL
1002 /*
1003 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1004 * instruction.
1005 */
1006 /** @todo optimize this differently by not using PGMPhysRead. */
1007 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1008 pIemCpu->GCPhysOpcodes = GCPhys;
1009 if ( offPrevOpcodes < cbOldOpcodes
1010 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1011 {
1012 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1013 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1014 pIemCpu->cbOpcode = cbNew;
1015 return VINF_SUCCESS;
1016 }
1017#endif
1018
1019 /*
1020 * Read the bytes at this address.
1021 */
1022 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1026 cbToTryRead = sizeof(pIemCpu->abOpcode);
1027 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
1028 * doing that. */
1029 if (!pIemCpu->fBypassHandlers)
1030 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
1031 else
1032 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
1033 if (rc != VINF_SUCCESS)
1034 {
1035 /** @todo status code handling */
1036 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1037 GCPtrPC, GCPhys, rc, cbToTryRead));
1038 return rc;
1039 }
1040 pIemCpu->cbOpcode = cbToTryRead;
1041
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1048 * exception if it fails.
1049 *
1050 * @returns Strict VBox status code.
1051 * @param pIemCpu The IEM state.
1052 * @param cbMin The minimum number of bytes relative offOpcode
1053 * that must be read.
1054 */
1055static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1056{
1057 /*
1058 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1059 *
1060 * First translate CS:rIP to a physical address.
1061 */
1062 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1063 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1064 uint32_t cbToTryRead;
1065 RTGCPTR GCPtrNext;
1066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1067 {
1068 cbToTryRead = PAGE_SIZE;
1069 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1070 if (!IEM_IS_CANONICAL(GCPtrNext))
1071 return iemRaiseGeneralProtectionFault0(pIemCpu);
1072 }
1073 else
1074 {
1075 uint32_t GCPtrNext32 = pCtx->eip;
1076 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1077 GCPtrNext32 += pIemCpu->cbOpcode;
1078 if (GCPtrNext32 > pCtx->cs.u32Limit)
1079 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1080 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1081 if (!cbToTryRead) /* overflowed */
1082 {
1083 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1084 cbToTryRead = UINT32_MAX;
1085 /** @todo check out wrapping around the code segment. */
1086 }
1087 if (cbToTryRead < cbMin - cbLeft)
1088 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1089 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
1090 }
1091
1092 /* Only read up to the end of the page, and make sure we don't read more
1093 than the opcode buffer can hold. */
1094 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1095 if (cbToTryRead > cbLeftOnPage)
1096 cbToTryRead = cbLeftOnPage;
1097 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1098 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1099 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1100
1101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1102 /* Allow interpretation of patch manager code blocks since they can for
1103 instance throw #PFs for perfectly good reasons. */
1104 if (pIemCpu->fInPatchCode)
1105 {
1106 size_t cbRead = 0;
1107 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1108 AssertRCReturn(rc, rc);
1109 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1110 return VINF_SUCCESS;
1111 }
1112#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1113
1114 RTGCPHYS GCPhys;
1115 uint64_t fFlags;
1116 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1117 if (RT_FAILURE(rc))
1118 {
1119 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1120 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1121 }
1122 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1123 {
1124 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1125 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1126 }
1127 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1128 {
1129 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1130 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1131 }
1132 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1133 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1134 /** @todo Check reserved bits and such stuff. PGM is better at doing
1135 * that, so do it when implementing the guest virtual address
1136 * TLB... */
1137
1138 /*
1139 * Read the bytes at this address.
1140 */
1141 if (!pIemCpu->fBypassHandlers)
1142 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1143 else
1144 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1145 if (rc != VINF_SUCCESS)
1146 {
1147 /** @todo status code handling */
1148 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1149 return rc;
1150 }
1151 pIemCpu->cbOpcode += cbToTryRead;
1152 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1153
1154 return VINF_SUCCESS;
1155}
1156
1157
1158/**
1159 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1160 *
1161 * @returns Strict VBox status code.
1162 * @param pIemCpu The IEM state.
1163 * @param pb Where to return the opcode byte.
1164 */
1165DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1166{
1167 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1168 if (rcStrict == VINF_SUCCESS)
1169 {
1170 uint8_t offOpcode = pIemCpu->offOpcode;
1171 *pb = pIemCpu->abOpcode[offOpcode];
1172 pIemCpu->offOpcode = offOpcode + 1;
1173 }
1174 else
1175 *pb = 0;
1176 return rcStrict;
1177}
1178
1179
1180/**
1181 * Fetches the next opcode byte.
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pIemCpu The IEM state.
1185 * @param pu8 Where to return the opcode byte.
1186 */
1187DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1188{
1189 uint8_t const offOpcode = pIemCpu->offOpcode;
1190 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1191 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1192
1193 *pu8 = pIemCpu->abOpcode[offOpcode];
1194 pIemCpu->offOpcode = offOpcode + 1;
1195 return VINF_SUCCESS;
1196}
1197
1198
1199/**
1200 * Fetches the next opcode byte, returns automatically on failure.
1201 *
1202 * @param a_pu8 Where to return the opcode byte.
1203 * @remark Implicitly references pIemCpu.
1204 */
1205#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1206 do \
1207 { \
1208 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1209 if (rcStrict2 != VINF_SUCCESS) \
1210 return rcStrict2; \
1211 } while (0)
1212
1213
1214/**
1215 * Fetches the next signed byte from the opcode stream.
1216 *
1217 * @returns Strict VBox status code.
1218 * @param pIemCpu The IEM state.
1219 * @param pi8 Where to return the signed byte.
1220 */
1221DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1222{
1223 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1224}
1225
1226
1227/**
1228 * Fetches the next signed byte from the opcode stream, returning automatically
1229 * on failure.
1230 *
1231 * @param pi8 Where to return the signed byte.
1232 * @remark Implicitly references pIemCpu.
1233 */
1234#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1235 do \
1236 { \
1237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1238 if (rcStrict2 != VINF_SUCCESS) \
1239 return rcStrict2; \
1240 } while (0)
1241
1242
1243/**
1244 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1245 *
1246 * @returns Strict VBox status code.
1247 * @param pIemCpu The IEM state.
1248 * @param pu16 Where to return the opcode dword.
1249 */
1250DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1251{
1252 uint8_t u8;
1253 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1254 if (rcStrict == VINF_SUCCESS)
1255 *pu16 = (int8_t)u8;
1256 return rcStrict;
1257}
1258
1259
1260/**
1261 * Fetches the next signed byte from the opcode stream, extending it to
1262 * unsigned 16-bit.
1263 *
1264 * @returns Strict VBox status code.
1265 * @param pIemCpu The IEM state.
1266 * @param pu16 Where to return the unsigned word.
1267 */
1268DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1269{
1270 uint8_t const offOpcode = pIemCpu->offOpcode;
1271 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1272 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1273
1274 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1275 pIemCpu->offOpcode = offOpcode + 1;
1276 return VINF_SUCCESS;
1277}
1278
1279
1280/**
1281 * Fetches the next signed byte from the opcode stream and sign-extending it to
1282 * a word, returning automatically on failure.
1283 *
1284 * @param pu16 Where to return the word.
1285 * @remark Implicitly references pIemCpu.
1286 */
1287#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1288 do \
1289 { \
1290 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1291 if (rcStrict2 != VINF_SUCCESS) \
1292 return rcStrict2; \
1293 } while (0)
1294
1295
1296/**
1297 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1298 *
1299 * @returns Strict VBox status code.
1300 * @param pIemCpu The IEM state.
1301 * @param pu32 Where to return the opcode dword.
1302 */
1303DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1304{
1305 uint8_t u8;
1306 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1307 if (rcStrict == VINF_SUCCESS)
1308 *pu32 = (int8_t)u8;
1309 return rcStrict;
1310}
1311
1312
1313/**
1314 * Fetches the next signed byte from the opcode stream, extending it to
1315 * unsigned 32-bit.
1316 *
1317 * @returns Strict VBox status code.
1318 * @param pIemCpu The IEM state.
1319 * @param pu32 Where to return the unsigned dword.
1320 */
1321DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1322{
1323 uint8_t const offOpcode = pIemCpu->offOpcode;
1324 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1325 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1326
1327 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1328 pIemCpu->offOpcode = offOpcode + 1;
1329 return VINF_SUCCESS;
1330}
1331
1332
1333/**
1334 * Fetches the next signed byte from the opcode stream and sign-extending it to
1335 * a word, returning automatically on failure.
1336 *
1337 * @param pu32 Where to return the word.
1338 * @remark Implicitly references pIemCpu.
1339 */
1340#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1341 do \
1342 { \
1343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1344 if (rcStrict2 != VINF_SUCCESS) \
1345 return rcStrict2; \
1346 } while (0)
1347
1348
1349/**
1350 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1351 *
1352 * @returns Strict VBox status code.
1353 * @param pIemCpu The IEM state.
1354 * @param pu64 Where to return the opcode qword.
1355 */
1356DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1357{
1358 uint8_t u8;
1359 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1360 if (rcStrict == VINF_SUCCESS)
1361 *pu64 = (int8_t)u8;
1362 return rcStrict;
1363}
1364
1365
1366/**
1367 * Fetches the next signed byte from the opcode stream, extending it to
1368 * unsigned 64-bit.
1369 *
1370 * @returns Strict VBox status code.
1371 * @param pIemCpu The IEM state.
1372 * @param pu64 Where to return the unsigned qword.
1373 */
1374DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1375{
1376 uint8_t const offOpcode = pIemCpu->offOpcode;
1377 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1378 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1379
1380 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1381 pIemCpu->offOpcode = offOpcode + 1;
1382 return VINF_SUCCESS;
1383}
1384
1385
1386/**
1387 * Fetches the next signed byte from the opcode stream and sign-extending it to
1388 * a word, returning automatically on failure.
1389 *
1390 * @param pu64 Where to return the word.
1391 * @remark Implicitly references pIemCpu.
1392 */
1393#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1394 do \
1395 { \
1396 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1397 if (rcStrict2 != VINF_SUCCESS) \
1398 return rcStrict2; \
1399 } while (0)
1400
1401
1402/**
1403 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1404 *
1405 * @returns Strict VBox status code.
1406 * @param pIemCpu The IEM state.
1407 * @param pu16 Where to return the opcode word.
1408 */
1409DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1410{
1411 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1412 if (rcStrict == VINF_SUCCESS)
1413 {
1414 uint8_t offOpcode = pIemCpu->offOpcode;
1415 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1416 pIemCpu->offOpcode = offOpcode + 2;
1417 }
1418 else
1419 *pu16 = 0;
1420 return rcStrict;
1421}
1422
1423
1424/**
1425 * Fetches the next opcode word.
1426 *
1427 * @returns Strict VBox status code.
1428 * @param pIemCpu The IEM state.
1429 * @param pu16 Where to return the opcode word.
1430 */
1431DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1432{
1433 uint8_t const offOpcode = pIemCpu->offOpcode;
1434 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1435 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1436
1437 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1438 pIemCpu->offOpcode = offOpcode + 2;
1439 return VINF_SUCCESS;
1440}
1441
1442
1443/**
1444 * Fetches the next opcode word, returns automatically on failure.
1445 *
1446 * @param a_pu16 Where to return the opcode word.
1447 * @remark Implicitly references pIemCpu.
1448 */
1449#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1450 do \
1451 { \
1452 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1453 if (rcStrict2 != VINF_SUCCESS) \
1454 return rcStrict2; \
1455 } while (0)
1456
1457
1458/**
1459 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pIemCpu The IEM state.
1463 * @param pu32 Where to return the opcode double word.
1464 */
1465DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1466{
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pIemCpu->offOpcode;
1471 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1472 pIemCpu->offOpcode = offOpcode + 2;
1473 }
1474 else
1475 *pu32 = 0;
1476 return rcStrict;
1477}
1478
1479
1480/**
1481 * Fetches the next opcode word, zero extending it to a double word.
1482 *
1483 * @returns Strict VBox status code.
1484 * @param pIemCpu The IEM state.
1485 * @param pu32 Where to return the opcode double word.
1486 */
1487DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1488{
1489 uint8_t const offOpcode = pIemCpu->offOpcode;
1490 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1491 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1492
1493 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1494 pIemCpu->offOpcode = offOpcode + 2;
1495 return VINF_SUCCESS;
1496}
1497
1498
1499/**
1500 * Fetches the next opcode word and zero extends it to a double word, returns
1501 * automatically on failure.
1502 *
1503 * @param a_pu32 Where to return the opcode double word.
1504 * @remark Implicitly references pIemCpu.
1505 */
1506#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1507 do \
1508 { \
1509 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1510 if (rcStrict2 != VINF_SUCCESS) \
1511 return rcStrict2; \
1512 } while (0)
1513
1514
1515/**
1516 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1517 *
1518 * @returns Strict VBox status code.
1519 * @param pIemCpu The IEM state.
1520 * @param pu64 Where to return the opcode quad word.
1521 */
1522DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1523{
1524 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1525 if (rcStrict == VINF_SUCCESS)
1526 {
1527 uint8_t offOpcode = pIemCpu->offOpcode;
1528 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1529 pIemCpu->offOpcode = offOpcode + 2;
1530 }
1531 else
1532 *pu64 = 0;
1533 return rcStrict;
1534}
1535
1536
1537/**
1538 * Fetches the next opcode word, zero extending it to a quad word.
1539 *
1540 * @returns Strict VBox status code.
1541 * @param pIemCpu The IEM state.
1542 * @param pu64 Where to return the opcode quad word.
1543 */
1544DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1545{
1546 uint8_t const offOpcode = pIemCpu->offOpcode;
1547 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1548 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1549
1550 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1551 pIemCpu->offOpcode = offOpcode + 2;
1552 return VINF_SUCCESS;
1553}
1554
1555
1556/**
1557 * Fetches the next opcode word and zero extends it to a quad word, returns
1558 * automatically on failure.
1559 *
1560 * @param a_pu64 Where to return the opcode quad word.
1561 * @remark Implicitly references pIemCpu.
1562 */
1563#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1564 do \
1565 { \
1566 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1567 if (rcStrict2 != VINF_SUCCESS) \
1568 return rcStrict2; \
1569 } while (0)
1570
1571
1572/**
1573 * Fetches the next signed word from the opcode stream.
1574 *
1575 * @returns Strict VBox status code.
1576 * @param pIemCpu The IEM state.
1577 * @param pi16 Where to return the signed word.
1578 */
1579DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1580{
1581 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1582}
1583
1584
1585/**
1586 * Fetches the next signed word from the opcode stream, returning automatically
1587 * on failure.
1588 *
1589 * @param pi16 Where to return the signed word.
1590 * @remark Implicitly references pIemCpu.
1591 */
1592#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1593 do \
1594 { \
1595 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1596 if (rcStrict2 != VINF_SUCCESS) \
1597 return rcStrict2; \
1598 } while (0)
1599
1600
1601/**
1602 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1603 *
1604 * @returns Strict VBox status code.
1605 * @param pIemCpu The IEM state.
1606 * @param pu32 Where to return the opcode dword.
1607 */
1608DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1609{
1610 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1611 if (rcStrict == VINF_SUCCESS)
1612 {
1613 uint8_t offOpcode = pIemCpu->offOpcode;
1614 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1615 pIemCpu->abOpcode[offOpcode + 1],
1616 pIemCpu->abOpcode[offOpcode + 2],
1617 pIemCpu->abOpcode[offOpcode + 3]);
1618 pIemCpu->offOpcode = offOpcode + 4;
1619 }
1620 else
1621 *pu32 = 0;
1622 return rcStrict;
1623}
1624
1625
1626/**
1627 * Fetches the next opcode dword.
1628 *
1629 * @returns Strict VBox status code.
1630 * @param pIemCpu The IEM state.
1631 * @param pu32 Where to return the opcode double word.
1632 */
1633DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1634{
1635 uint8_t const offOpcode = pIemCpu->offOpcode;
1636 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1637 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1638
1639 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1640 pIemCpu->abOpcode[offOpcode + 1],
1641 pIemCpu->abOpcode[offOpcode + 2],
1642 pIemCpu->abOpcode[offOpcode + 3]);
1643 pIemCpu->offOpcode = offOpcode + 4;
1644 return VINF_SUCCESS;
1645}
1646
1647
1648/**
1649 * Fetches the next opcode dword, returns automatically on failure.
1650 *
1651 * @param a_pu32 Where to return the opcode dword.
1652 * @remark Implicitly references pIemCpu.
1653 */
1654#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1655 do \
1656 { \
1657 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1658 if (rcStrict2 != VINF_SUCCESS) \
1659 return rcStrict2; \
1660 } while (0)
1661
1662
1663/**
1664 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1665 *
1666 * @returns Strict VBox status code.
1667 * @param pIemCpu The IEM state.
1668 * @param pu32 Where to return the opcode dword.
1669 */
1670DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1671{
1672 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1673 if (rcStrict == VINF_SUCCESS)
1674 {
1675 uint8_t offOpcode = pIemCpu->offOpcode;
1676 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1677 pIemCpu->abOpcode[offOpcode + 1],
1678 pIemCpu->abOpcode[offOpcode + 2],
1679 pIemCpu->abOpcode[offOpcode + 3]);
1680 pIemCpu->offOpcode = offOpcode + 4;
1681 }
1682 else
1683 *pu64 = 0;
1684 return rcStrict;
1685}
1686
1687
1688/**
1689 * Fetches the next opcode dword, zero extending it to a quad word.
1690 *
1691 * @returns Strict VBox status code.
1692 * @param pIemCpu The IEM state.
1693 * @param pu64 Where to return the opcode quad word.
1694 */
1695DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1696{
1697 uint8_t const offOpcode = pIemCpu->offOpcode;
1698 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1699 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1700
1701 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1702 pIemCpu->abOpcode[offOpcode + 1],
1703 pIemCpu->abOpcode[offOpcode + 2],
1704 pIemCpu->abOpcode[offOpcode + 3]);
1705 pIemCpu->offOpcode = offOpcode + 4;
1706 return VINF_SUCCESS;
1707}
1708
1709
1710/**
1711 * Fetches the next opcode dword and zero extends it to a quad word, returns
1712 * automatically on failure.
1713 *
1714 * @param a_pu64 Where to return the opcode quad word.
1715 * @remark Implicitly references pIemCpu.
1716 */
1717#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1718 do \
1719 { \
1720 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1721 if (rcStrict2 != VINF_SUCCESS) \
1722 return rcStrict2; \
1723 } while (0)
1724
1725
1726/**
1727 * Fetches the next signed double word from the opcode stream.
1728 *
1729 * @returns Strict VBox status code.
1730 * @param pIemCpu The IEM state.
1731 * @param pi32 Where to return the signed double word.
1732 */
1733DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1734{
1735 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1736}
1737
1738/**
1739 * Fetches the next signed double word from the opcode stream, returning
1740 * automatically on failure.
1741 *
1742 * @param pi32 Where to return the signed double word.
1743 * @remark Implicitly references pIemCpu.
1744 */
1745#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1746 do \
1747 { \
1748 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1749 if (rcStrict2 != VINF_SUCCESS) \
1750 return rcStrict2; \
1751 } while (0)
1752
1753
1754/**
1755 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1756 *
1757 * @returns Strict VBox status code.
1758 * @param pIemCpu The IEM state.
1759 * @param pu64 Where to return the opcode qword.
1760 */
1761DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1762{
1763 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1764 if (rcStrict == VINF_SUCCESS)
1765 {
1766 uint8_t offOpcode = pIemCpu->offOpcode;
1767 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1768 pIemCpu->abOpcode[offOpcode + 1],
1769 pIemCpu->abOpcode[offOpcode + 2],
1770 pIemCpu->abOpcode[offOpcode + 3]);
1771 pIemCpu->offOpcode = offOpcode + 4;
1772 }
1773 else
1774 *pu64 = 0;
1775 return rcStrict;
1776}
1777
1778
1779/**
1780 * Fetches the next opcode dword, sign extending it into a quad word.
1781 *
1782 * @returns Strict VBox status code.
1783 * @param pIemCpu The IEM state.
1784 * @param pu64 Where to return the opcode quad word.
1785 */
1786DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1787{
1788 uint8_t const offOpcode = pIemCpu->offOpcode;
1789 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1790 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1791
1792 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1793 pIemCpu->abOpcode[offOpcode + 1],
1794 pIemCpu->abOpcode[offOpcode + 2],
1795 pIemCpu->abOpcode[offOpcode + 3]);
1796 *pu64 = i32;
1797 pIemCpu->offOpcode = offOpcode + 4;
1798 return VINF_SUCCESS;
1799}
1800
1801
1802/**
1803 * Fetches the next opcode double word and sign extends it to a quad word,
1804 * returns automatically on failure.
1805 *
1806 * @param a_pu64 Where to return the opcode quad word.
1807 * @remark Implicitly references pIemCpu.
1808 */
1809#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1810 do \
1811 { \
1812 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1813 if (rcStrict2 != VINF_SUCCESS) \
1814 return rcStrict2; \
1815 } while (0)
1816
1817
1818/**
1819 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1820 *
1821 * @returns Strict VBox status code.
1822 * @param pIemCpu The IEM state.
1823 * @param pu64 Where to return the opcode qword.
1824 */
1825DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1826{
1827 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1828 if (rcStrict == VINF_SUCCESS)
1829 {
1830 uint8_t offOpcode = pIemCpu->offOpcode;
1831 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1832 pIemCpu->abOpcode[offOpcode + 1],
1833 pIemCpu->abOpcode[offOpcode + 2],
1834 pIemCpu->abOpcode[offOpcode + 3],
1835 pIemCpu->abOpcode[offOpcode + 4],
1836 pIemCpu->abOpcode[offOpcode + 5],
1837 pIemCpu->abOpcode[offOpcode + 6],
1838 pIemCpu->abOpcode[offOpcode + 7]);
1839 pIemCpu->offOpcode = offOpcode + 8;
1840 }
1841 else
1842 *pu64 = 0;
1843 return rcStrict;
1844}
1845
1846
1847/**
1848 * Fetches the next opcode qword.
1849 *
1850 * @returns Strict VBox status code.
1851 * @param pIemCpu The IEM state.
1852 * @param pu64 Where to return the opcode qword.
1853 */
1854DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1855{
1856 uint8_t const offOpcode = pIemCpu->offOpcode;
1857 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1858 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1859
1860 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1861 pIemCpu->abOpcode[offOpcode + 1],
1862 pIemCpu->abOpcode[offOpcode + 2],
1863 pIemCpu->abOpcode[offOpcode + 3],
1864 pIemCpu->abOpcode[offOpcode + 4],
1865 pIemCpu->abOpcode[offOpcode + 5],
1866 pIemCpu->abOpcode[offOpcode + 6],
1867 pIemCpu->abOpcode[offOpcode + 7]);
1868 pIemCpu->offOpcode = offOpcode + 8;
1869 return VINF_SUCCESS;
1870}
1871
1872
1873/**
1874 * Fetches the next opcode quad word, returns automatically on failure.
1875 *
1876 * @param a_pu64 Where to return the opcode quad word.
1877 * @remark Implicitly references pIemCpu.
1878 */
1879#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1880 do \
1881 { \
1882 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1883 if (rcStrict2 != VINF_SUCCESS) \
1884 return rcStrict2; \
1885 } while (0)
1886
1887
1888/** @name Misc Worker Functions.
1889 * @{
1890 */
1891
1892
1893/**
1894 * Validates a new SS segment.
1895 *
1896 * @returns VBox strict status code.
1897 * @param pIemCpu The IEM per CPU instance data.
1898 * @param pCtx The CPU context.
1899 * @param NewSS The new SS selctor.
1900 * @param uCpl The CPL to load the stack for.
1901 * @param pDesc Where to return the descriptor.
1902 */
1903static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1904{
1905 NOREF(pCtx);
1906
1907 /* Null selectors are not allowed (we're not called for dispatching
1908 interrupts with SS=0 in long mode). */
1909 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1910 {
1911 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1912 return iemRaiseTaskSwitchFault0(pIemCpu);
1913 }
1914
1915 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1916 if ((NewSS & X86_SEL_RPL) != uCpl)
1917 {
1918 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1919 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1920 }
1921
1922 /*
1923 * Read the descriptor.
1924 */
1925 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1926 if (rcStrict != VINF_SUCCESS)
1927 return rcStrict;
1928
1929 /*
1930 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1931 */
1932 if (!pDesc->Legacy.Gen.u1DescType)
1933 {
1934 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1935 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1936 }
1937
1938 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1939 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1940 {
1941 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1942 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1943 }
1944 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1945 {
1946 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1947 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1948 }
1949
1950 /* Is it there? */
1951 /** @todo testcase: Is this checked before the canonical / limit check below? */
1952 if (!pDesc->Legacy.Gen.u1Present)
1953 {
1954 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1955 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1956 }
1957
1958 return VINF_SUCCESS;
1959}
1960
1961
1962/**
1963 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1964 * not.
1965 *
1966 * @param a_pIemCpu The IEM per CPU data.
1967 * @param a_pCtx The CPU context.
1968 */
1969#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1970# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1971 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1972 ? (a_pCtx)->eflags.u \
1973 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1974#else
1975# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1976 ( (a_pCtx)->eflags.u )
1977#endif
1978
1979/**
1980 * Updates the EFLAGS in the correct manner wrt. PATM.
1981 *
1982 * @param a_pIemCpu The IEM per CPU data.
1983 * @param a_pCtx The CPU context.
1984 */
1985#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1986# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1987 do { \
1988 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1989 (a_pCtx)->eflags.u = (a_fEfl); \
1990 else \
1991 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1992 } while (0)
1993#else
1994# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1995 do { \
1996 (a_pCtx)->eflags.u = (a_fEfl); \
1997 } while (0)
1998#endif
1999
2000
2001/** @} */
2002
2003/** @name Raising Exceptions.
2004 *
2005 * @{
2006 */
2007
2008/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2009 * @{ */
2010/** CPU exception. */
2011#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2012/** External interrupt (from PIC, APIC, whatever). */
2013#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2014/** Software interrupt (int or into, not bound).
2015 * Returns to the following instruction */
2016#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2017/** Takes an error code. */
2018#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2019/** Takes a CR2. */
2020#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2021/** Generated by the breakpoint instruction. */
2022#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2023/** @} */
2024
2025
2026/**
2027 * Loads the specified stack far pointer from the TSS.
2028 *
2029 * @returns VBox strict status code.
2030 * @param pIemCpu The IEM per CPU instance data.
2031 * @param pCtx The CPU context.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param pSelSS Where to return the new stack segment.
2034 * @param puEsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2037 PRTSEL pSelSS, uint32_t *puEsp)
2038{
2039 VBOXSTRICTRC rcStrict;
2040 Assert(uCpl < 4);
2041 *puEsp = 0; /* make gcc happy */
2042 *pSelSS = 0; /* make gcc happy */
2043
2044 switch (pCtx->tr.Attr.n.u4Type)
2045 {
2046 /*
2047 * 16-bit TSS (X86TSS16).
2048 */
2049 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2050 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2051 {
2052 uint32_t off = uCpl * 4 + 2;
2053 if (off + 4 > pCtx->tr.u32Limit)
2054 {
2055 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2056 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2057 }
2058
2059 uint32_t u32Tmp = 0; /* gcc maybe... */
2060 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2061 if (rcStrict == VINF_SUCCESS)
2062 {
2063 *puEsp = RT_LOWORD(u32Tmp);
2064 *pSelSS = RT_HIWORD(u32Tmp);
2065 return VINF_SUCCESS;
2066 }
2067 break;
2068 }
2069
2070 /*
2071 * 32-bit TSS (X86TSS32).
2072 */
2073 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2074 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2075 {
2076 uint32_t off = uCpl * 8 + 4;
2077 if (off + 7 > pCtx->tr.u32Limit)
2078 {
2079 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2080 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2081 }
2082
2083 uint64_t u64Tmp;
2084 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2085 if (rcStrict == VINF_SUCCESS)
2086 {
2087 *puEsp = u64Tmp & UINT32_MAX;
2088 *pSelSS = (RTSEL)(u64Tmp >> 32);
2089 return VINF_SUCCESS;
2090 }
2091 break;
2092 }
2093
2094 default:
2095 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2096 }
2097 return rcStrict;
2098}
2099
2100
2101/**
2102 * Loads the specified stack pointer from the 64-bit TSS.
2103 *
2104 * @returns VBox strict status code.
2105 * @param pIemCpu The IEM per CPU instance data.
2106 * @param pCtx The CPU context.
2107 * @param uCpl The CPL to load the stack for.
2108 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2109 * @param puRsp Where to return the new stack pointer.
2110 */
2111static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2112 uint64_t *puRsp)
2113{
2114 Assert(uCpl < 4);
2115 Assert(uIst < 8);
2116 *puRsp = 0; /* make gcc happy */
2117
2118 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2119
2120 uint32_t off;
2121 if (uIst)
2122 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2123 else
2124 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2125 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2126 {
2127 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2128 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2129 }
2130
2131 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2132}
2133
2134
2135/**
2136 * Adjust the CPU state according to the exception being raised.
2137 *
2138 * @param pCtx The CPU context.
2139 * @param u8Vector The exception that has been raised.
2140 */
2141DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2142{
2143 switch (u8Vector)
2144 {
2145 case X86_XCPT_DB:
2146 pCtx->dr[7] &= ~X86_DR7_GD;
2147 break;
2148 /** @todo Read the AMD and Intel exception reference... */
2149 }
2150}
2151
2152
2153/**
2154 * Implements exceptions and interrupts for real mode.
2155 *
2156 * @returns VBox strict status code.
2157 * @param pIemCpu The IEM per CPU instance data.
2158 * @param pCtx The CPU context.
2159 * @param cbInstr The number of bytes to offset rIP by in the return
2160 * address.
2161 * @param u8Vector The interrupt / exception vector number.
2162 * @param fFlags The flags.
2163 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2164 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2165 */
2166static VBOXSTRICTRC
2167iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2168 PCPUMCTX pCtx,
2169 uint8_t cbInstr,
2170 uint8_t u8Vector,
2171 uint32_t fFlags,
2172 uint16_t uErr,
2173 uint64_t uCr2)
2174{
2175 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2176 NOREF(uErr); NOREF(uCr2);
2177
2178 /*
2179 * Read the IDT entry.
2180 */
2181 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2182 {
2183 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2184 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2185 }
2186 RTFAR16 Idte;
2187 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2188 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2189 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2190 return rcStrict;
2191
2192 /*
2193 * Push the stack frame.
2194 */
2195 uint16_t *pu16Frame;
2196 uint64_t uNewRsp;
2197 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2198 if (rcStrict != VINF_SUCCESS)
2199 return rcStrict;
2200
2201 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2202 pu16Frame[2] = (uint16_t)fEfl;
2203 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2204 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2205 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2206 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2207 return rcStrict;
2208
2209 /*
2210 * Load the vector address into cs:ip and make exception specific state
2211 * adjustments.
2212 */
2213 pCtx->cs.Sel = Idte.sel;
2214 pCtx->cs.ValidSel = Idte.sel;
2215 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2216 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2217 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2218 pCtx->rip = Idte.off;
2219 fEfl &= ~X86_EFL_IF;
2220 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2221
2222 /** @todo do we actually do this in real mode? */
2223 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2224 iemRaiseXcptAdjustState(pCtx, u8Vector);
2225
2226 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2227}
2228
2229
2230/**
2231 * Implements exceptions and interrupts for protected mode.
2232 *
2233 * @returns VBox strict status code.
2234 * @param pIemCpu The IEM per CPU instance data.
2235 * @param pCtx The CPU context.
2236 * @param cbInstr The number of bytes to offset rIP by in the return
2237 * address.
2238 * @param u8Vector The interrupt / exception vector number.
2239 * @param fFlags The flags.
2240 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2241 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2242 */
2243static VBOXSTRICTRC
2244iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2245 PCPUMCTX pCtx,
2246 uint8_t cbInstr,
2247 uint8_t u8Vector,
2248 uint32_t fFlags,
2249 uint16_t uErr,
2250 uint64_t uCr2)
2251{
2252 NOREF(cbInstr);
2253
2254 /*
2255 * Read the IDT entry.
2256 */
2257 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2258 {
2259 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2260 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2261 }
2262 X86DESC Idte;
2263 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2264 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2265 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2266 return rcStrict;
2267 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2268 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2269 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2270
2271 /*
2272 * Check the descriptor type, DPL and such.
2273 * ASSUMES this is done in the same order as described for call-gate calls.
2274 */
2275 if (Idte.Gate.u1DescType)
2276 {
2277 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2278 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2279 }
2280 uint8_t f32BitGate = true;
2281 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2282 switch (Idte.Gate.u4Type)
2283 {
2284 case X86_SEL_TYPE_SYS_UNDEFINED:
2285 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2286 case X86_SEL_TYPE_SYS_LDT:
2287 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2288 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2289 case X86_SEL_TYPE_SYS_UNDEFINED2:
2290 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2291 case X86_SEL_TYPE_SYS_UNDEFINED3:
2292 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2293 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2294 case X86_SEL_TYPE_SYS_UNDEFINED4:
2295 {
2296 /** @todo check what actually happens when the type is wrong...
2297 * esp. call gates. */
2298 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2299 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2300 }
2301
2302 case X86_SEL_TYPE_SYS_286_INT_GATE:
2303 f32BitGate = false;
2304 case X86_SEL_TYPE_SYS_386_INT_GATE:
2305 fEflToClear |= X86_EFL_IF;
2306 break;
2307
2308 case X86_SEL_TYPE_SYS_TASK_GATE:
2309 /** @todo task gates. */
2310 AssertFailedReturn(VERR_NOT_SUPPORTED);
2311
2312 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2313 f32BitGate = false;
2314 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2315 break;
2316
2317 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2318 }
2319
2320 /* Check DPL against CPL if applicable. */
2321 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2322 {
2323 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2324 {
2325 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2326 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2327 }
2328 }
2329
2330 /* Is it there? */
2331 if (!Idte.Gate.u1Present)
2332 {
2333 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2334 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2335 }
2336
2337 /* A null CS is bad. */
2338 RTSEL NewCS = Idte.Gate.u16Sel;
2339 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2340 {
2341 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2342 return iemRaiseGeneralProtectionFault0(pIemCpu);
2343 }
2344
2345 /* Fetch the descriptor for the new CS. */
2346 IEMSELDESC DescCS;
2347 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
2348 if (rcStrict != VINF_SUCCESS)
2349 {
2350 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2351 return rcStrict;
2352 }
2353
2354 /* Must be a code segment. */
2355 if (!DescCS.Legacy.Gen.u1DescType)
2356 {
2357 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2358 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2359 }
2360 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2361 {
2362 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2363 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2364 }
2365
2366 /* Don't allow lowering the privilege level. */
2367 /** @todo Does the lowering of privileges apply to software interrupts
2368 * only? This has bearings on the more-privileged or
2369 * same-privilege stack behavior further down. A testcase would
2370 * be nice. */
2371 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2372 {
2373 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2374 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2375 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2376 }
2377
2378 /* Make sure the selector is present. */
2379 if (!DescCS.Legacy.Gen.u1Present)
2380 {
2381 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2382 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2383 }
2384
2385 /* Check the new EIP against the new CS limit. */
2386 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2387 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2388 ? Idte.Gate.u16OffsetLow
2389 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2390 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2391 if (uNewEip > cbLimitCS)
2392 {
2393 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2394 u8Vector, uNewEip, cbLimitCS, NewCS));
2395 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2396 }
2397
2398 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2399 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2400 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2401
2402 /* From V8086 mode only go to CPL 0. */
2403 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
2404 {
2405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
2406 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2407 }
2408
2409 /*
2410 * If the privilege level changes, we need to get a new stack from the TSS.
2411 * This in turns means validating the new SS and ESP...
2412 */
2413 if (uNewCpl != pIemCpu->uCpl)
2414 {
2415 RTSEL NewSS;
2416 uint32_t uNewEsp;
2417 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420
2421 IEMSELDESC DescSS;
2422 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2423 if (rcStrict != VINF_SUCCESS)
2424 return rcStrict;
2425
2426 /* Check that there is sufficient space for the stack frame. */
2427 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2428 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2429 {
2430 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2431 }
2432
2433 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
2434 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
2435 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
2436 if ( uNewEsp - 1 > cbLimitSS
2437 || uNewEsp < cbStackFrame)
2438 {
2439 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2440 u8Vector, NewSS, uNewEsp, cbStackFrame));
2441 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2442 }
2443
2444 /*
2445 * Start making changes.
2446 */
2447
2448 /* Create the stack frame. */
2449 RTPTRUNION uStackFrame;
2450 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2451 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2452 if (rcStrict != VINF_SUCCESS)
2453 return rcStrict;
2454 void * const pvStackFrame = uStackFrame.pv;
2455 if (f32BitGate)
2456 {
2457 if (fFlags & IEM_XCPT_FLAGS_ERR)
2458 *uStackFrame.pu32++ = uErr;
2459 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
2460 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2461 uStackFrame.pu32[2] = fEfl;
2462 uStackFrame.pu32[3] = pCtx->esp;
2463 uStackFrame.pu32[4] = pCtx->ss.Sel;
2464 if (fEfl & X86_EFL_VM)
2465 {
2466 uStackFrame.pu32[1] = pCtx->cs.Sel;
2467 uStackFrame.pu32[5] = pCtx->es.Sel;
2468 uStackFrame.pu32[6] = pCtx->ds.Sel;
2469 uStackFrame.pu32[7] = pCtx->fs.Sel;
2470 uStackFrame.pu32[8] = pCtx->gs.Sel;
2471 }
2472 }
2473 else
2474 {
2475 if (fFlags & IEM_XCPT_FLAGS_ERR)
2476 *uStackFrame.pu16++ = uErr;
2477 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2478 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2479 uStackFrame.pu16[2] = fEfl;
2480 uStackFrame.pu16[3] = pCtx->sp;
2481 uStackFrame.pu16[4] = pCtx->ss.Sel;
2482 if (fEfl & X86_EFL_VM)
2483 {
2484 uStackFrame.pu16[1] = pCtx->cs.Sel;
2485 uStackFrame.pu16[5] = pCtx->es.Sel;
2486 uStackFrame.pu16[6] = pCtx->ds.Sel;
2487 uStackFrame.pu16[7] = pCtx->fs.Sel;
2488 uStackFrame.pu16[8] = pCtx->gs.Sel;
2489 }
2490 }
2491 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2492 if (rcStrict != VINF_SUCCESS)
2493 return rcStrict;
2494
2495 /* Mark the selectors 'accessed' (hope this is the correct time). */
2496 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2497 * after pushing the stack frame? (Write protect the gdt + stack to
2498 * find out.) */
2499 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2500 {
2501 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2502 if (rcStrict != VINF_SUCCESS)
2503 return rcStrict;
2504 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2505 }
2506
2507 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2508 {
2509 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2510 if (rcStrict != VINF_SUCCESS)
2511 return rcStrict;
2512 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2513 }
2514
2515 /*
2516 * Start comitting the register changes (joins with the DPL=CPL branch).
2517 */
2518 pCtx->ss.Sel = NewSS;
2519 pCtx->ss.ValidSel = NewSS;
2520 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2521 pCtx->ss.u32Limit = cbLimitSS;
2522 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2523 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2524 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2525 pIemCpu->uCpl = uNewCpl;
2526
2527 if (fEfl & X86_EFL_VM)
2528 {
2529 iemHlpLoadNullDataSelectorProt(&pCtx->gs, 0);
2530 iemHlpLoadNullDataSelectorProt(&pCtx->fs, 0);
2531 iemHlpLoadNullDataSelectorProt(&pCtx->es, 0);
2532 iemHlpLoadNullDataSelectorProt(&pCtx->ds, 0);
2533 }
2534 }
2535 /*
2536 * Same privilege, no stack change and smaller stack frame.
2537 */
2538 else
2539 {
2540 uint64_t uNewRsp;
2541 RTPTRUNION uStackFrame;
2542 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
2543 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2544 if (rcStrict != VINF_SUCCESS)
2545 return rcStrict;
2546 void * const pvStackFrame = uStackFrame.pv;
2547
2548 if (f32BitGate)
2549 {
2550 if (fFlags & IEM_XCPT_FLAGS_ERR)
2551 *uStackFrame.pu32++ = uErr;
2552 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2553 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2554 uStackFrame.pu32[2] = fEfl;
2555 }
2556 else
2557 {
2558 if (fFlags & IEM_XCPT_FLAGS_ERR)
2559 *uStackFrame.pu16++ = uErr;
2560 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2561 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2562 uStackFrame.pu16[2] = fEfl;
2563 }
2564 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2565 if (rcStrict != VINF_SUCCESS)
2566 return rcStrict;
2567
2568 /* Mark the CS selector as 'accessed'. */
2569 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2570 {
2571 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2572 if (rcStrict != VINF_SUCCESS)
2573 return rcStrict;
2574 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2575 }
2576
2577 /*
2578 * Start committing the register changes (joins with the other branch).
2579 */
2580 pCtx->rsp = uNewRsp;
2581 }
2582
2583 /* ... register committing continues. */
2584 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2585 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2586 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2587 pCtx->cs.u32Limit = cbLimitCS;
2588 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2589 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2590
2591 pCtx->rip = uNewEip;
2592 fEfl &= ~fEflToClear;
2593 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2594
2595 if (fFlags & IEM_XCPT_FLAGS_CR2)
2596 pCtx->cr2 = uCr2;
2597
2598 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2599 iemRaiseXcptAdjustState(pCtx, u8Vector);
2600
2601 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2602}
2603
2604
2605/**
2606 * Implements exceptions and interrupts for long mode.
2607 *
2608 * @returns VBox strict status code.
2609 * @param pIemCpu The IEM per CPU instance data.
2610 * @param pCtx The CPU context.
2611 * @param cbInstr The number of bytes to offset rIP by in the return
2612 * address.
2613 * @param u8Vector The interrupt / exception vector number.
2614 * @param fFlags The flags.
2615 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2616 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2617 */
2618static VBOXSTRICTRC
2619iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2620 PCPUMCTX pCtx,
2621 uint8_t cbInstr,
2622 uint8_t u8Vector,
2623 uint32_t fFlags,
2624 uint16_t uErr,
2625 uint64_t uCr2)
2626{
2627 NOREF(cbInstr);
2628
2629 /*
2630 * Read the IDT entry.
2631 */
2632 uint16_t offIdt = (uint16_t)u8Vector << 4;
2633 if (pCtx->idtr.cbIdt < offIdt + 7)
2634 {
2635 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2636 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2637 }
2638 X86DESC64 Idte;
2639 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
2640 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2641 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
2642 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2643 return rcStrict;
2644 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2645 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2646 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2647
2648 /*
2649 * Check the descriptor type, DPL and such.
2650 * ASSUMES this is done in the same order as described for call-gate calls.
2651 */
2652 if (Idte.Gate.u1DescType)
2653 {
2654 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2655 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2656 }
2657 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2658 switch (Idte.Gate.u4Type)
2659 {
2660 case AMD64_SEL_TYPE_SYS_INT_GATE:
2661 fEflToClear |= X86_EFL_IF;
2662 break;
2663 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2664 break;
2665
2666 default:
2667 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2668 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2669 }
2670
2671 /* Check DPL against CPL if applicable. */
2672 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2673 {
2674 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2675 {
2676 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2677 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2678 }
2679 }
2680
2681 /* Is it there? */
2682 if (!Idte.Gate.u1Present)
2683 {
2684 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2685 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2686 }
2687
2688 /* A null CS is bad. */
2689 RTSEL NewCS = Idte.Gate.u16Sel;
2690 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2691 {
2692 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2693 return iemRaiseGeneralProtectionFault0(pIemCpu);
2694 }
2695
2696 /* Fetch the descriptor for the new CS. */
2697 IEMSELDESC DescCS;
2698 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
2699 if (rcStrict != VINF_SUCCESS)
2700 {
2701 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704
2705 /* Must be a 64-bit code segment. */
2706 if (!DescCS.Long.Gen.u1DescType)
2707 {
2708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2709 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2710 }
2711 if ( !DescCS.Long.Gen.u1Long
2712 || DescCS.Long.Gen.u1DefBig
2713 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2714 {
2715 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2716 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2717 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2718 }
2719
2720 /* Don't allow lowering the privilege level. For non-conforming CS
2721 selectors, the CS.DPL sets the privilege level the trap/interrupt
2722 handler runs at. For conforming CS selectors, the CPL remains
2723 unchanged, but the CS.DPL must be <= CPL. */
2724 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2725 * when CPU in Ring-0. Result \#GP? */
2726 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2727 {
2728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2729 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2730 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733
2734 /* Make sure the selector is present. */
2735 if (!DescCS.Legacy.Gen.u1Present)
2736 {
2737 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2738 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2739 }
2740
2741 /* Check that the new RIP is canonical. */
2742 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2743 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2744 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2745 if (!IEM_IS_CANONICAL(uNewRip))
2746 {
2747 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2748 return iemRaiseGeneralProtectionFault0(pIemCpu);
2749 }
2750
2751 /*
2752 * If the privilege level changes or if the IST isn't zero, we need to get
2753 * a new stack from the TSS.
2754 */
2755 uint64_t uNewRsp;
2756 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2757 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2758 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2759 if ( uNewCpl != pIemCpu->uCpl
2760 || Idte.Gate.u3IST != 0)
2761 {
2762 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2763 if (rcStrict != VINF_SUCCESS)
2764 return rcStrict;
2765 }
2766 else
2767 uNewRsp = pCtx->rsp;
2768 uNewRsp &= ~(uint64_t)0xf;
2769
2770 /*
2771 * Start making changes.
2772 */
2773
2774 /* Create the stack frame. */
2775 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2776 RTPTRUNION uStackFrame;
2777 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2778 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2779 if (rcStrict != VINF_SUCCESS)
2780 return rcStrict;
2781 void * const pvStackFrame = uStackFrame.pv;
2782
2783 if (fFlags & IEM_XCPT_FLAGS_ERR)
2784 *uStackFrame.pu64++ = uErr;
2785 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
2786 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
2787 uStackFrame.pu64[2] = fEfl;
2788 uStackFrame.pu64[3] = pCtx->rsp;
2789 uStackFrame.pu64[4] = pCtx->ss.Sel;
2790 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2791 if (rcStrict != VINF_SUCCESS)
2792 return rcStrict;
2793
2794 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2795 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2796 * after pushing the stack frame? (Write protect the gdt + stack to
2797 * find out.) */
2798 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2799 {
2800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2801 if (rcStrict != VINF_SUCCESS)
2802 return rcStrict;
2803 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2804 }
2805
2806 /*
2807 * Start comitting the register changes.
2808 */
2809 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2810 * hidden registers when interrupting 32-bit or 16-bit code! */
2811 if (uNewCpl != pIemCpu->uCpl)
2812 {
2813 pCtx->ss.Sel = 0 | uNewCpl;
2814 pCtx->ss.ValidSel = 0 | uNewCpl;
2815 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2816 pCtx->ss.u32Limit = UINT32_MAX;
2817 pCtx->ss.u64Base = 0;
2818 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2819 }
2820 pCtx->rsp = uNewRsp - cbStackFrame;
2821 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2822 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2823 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2824 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2825 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2826 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2827 pCtx->rip = uNewRip;
2828 pIemCpu->uCpl = uNewCpl;
2829
2830 fEfl &= ~fEflToClear;
2831 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2832
2833 if (fFlags & IEM_XCPT_FLAGS_CR2)
2834 pCtx->cr2 = uCr2;
2835
2836 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2837 iemRaiseXcptAdjustState(pCtx, u8Vector);
2838
2839 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2840}
2841
2842
2843/**
2844 * Implements exceptions and interrupts.
2845 *
2846 * All exceptions and interrupts goes thru this function!
2847 *
2848 * @returns VBox strict status code.
2849 * @param pIemCpu The IEM per CPU instance data.
2850 * @param cbInstr The number of bytes to offset rIP by in the return
2851 * address.
2852 * @param u8Vector The interrupt / exception vector number.
2853 * @param fFlags The flags.
2854 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2855 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2856 */
2857DECL_NO_INLINE(static, VBOXSTRICTRC)
2858iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2859 uint8_t cbInstr,
2860 uint8_t u8Vector,
2861 uint32_t fFlags,
2862 uint16_t uErr,
2863 uint64_t uCr2)
2864{
2865 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2866
2867 /*
2868 * Perform the V8086 IOPL check and upgrade the fault without nesting.
2869 */
2870 if ( pCtx->eflags.Bits.u1VM
2871 && pCtx->eflags.Bits.u2IOPL != 3
2872 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2873 && (pCtx->cr0 & X86_CR0_PE) )
2874 {
2875 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
2876 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2877 u8Vector = X86_XCPT_GP;
2878 uErr = 0;
2879 }
2880#ifdef DBGFTRACE_ENABLED
2881 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
2882 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
2883 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
2884#endif
2885
2886 /*
2887 * Do recursion accounting.
2888 */
2889 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2890 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2891 if (pIemCpu->cXcptRecursions == 0)
2892 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2893 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2894 else
2895 {
2896 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2897 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2898
2899 /** @todo double and tripple faults. */
2900 if (pIemCpu->cXcptRecursions >= 3)
2901 {
2902#ifdef DEBUG_bird
2903 AssertFailed();
2904#endif
2905 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2906 }
2907
2908 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2909 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2910 {
2911 ....
2912 } */
2913 }
2914 pIemCpu->cXcptRecursions++;
2915 pIemCpu->uCurXcpt = u8Vector;
2916 pIemCpu->fCurXcpt = fFlags;
2917
2918 /*
2919 * Extensive logging.
2920 */
2921#if defined(LOG_ENABLED) && defined(IN_RING3)
2922 if (LogIs3Enabled())
2923 {
2924 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2925 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2926 char szRegs[4096];
2927 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2928 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2929 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2930 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2931 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2932 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2933 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2934 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2935 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2936 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2937 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2938 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2939 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2940 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2941 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2942 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2943 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2944 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2945 " efer=%016VR{efer}\n"
2946 " pat=%016VR{pat}\n"
2947 " sf_mask=%016VR{sf_mask}\n"
2948 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2949 " lstar=%016VR{lstar}\n"
2950 " star=%016VR{star} cstar=%016VR{cstar}\n"
2951 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2952 );
2953
2954 char szInstr[256];
2955 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2956 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2957 szInstr, sizeof(szInstr), NULL);
2958 Log3(("%s%s\n", szRegs, szInstr));
2959 }
2960#endif /* LOG_ENABLED */
2961
2962 /*
2963 * Call the mode specific worker function.
2964 */
2965 VBOXSTRICTRC rcStrict;
2966 if (!(pCtx->cr0 & X86_CR0_PE))
2967 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2968 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2969 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2970 else
2971 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2972
2973 /*
2974 * Unwind.
2975 */
2976 pIemCpu->cXcptRecursions--;
2977 pIemCpu->uCurXcpt = uPrevXcpt;
2978 pIemCpu->fCurXcpt = fPrevXcpt;
2979 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2980 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2981 return rcStrict;
2982}
2983
2984
2985/** \#DE - 00. */
2986DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2987{
2988 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2989}
2990
2991
2992/** \#DB - 01. */
2993DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2994{
2995 /** @todo set/clear RF. */
2996 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2997}
2998
2999
3000/** \#UD - 06. */
3001DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
3002{
3003 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3004}
3005
3006
3007/** \#NM - 07. */
3008DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
3009{
3010 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3011}
3012
3013
3014#ifdef SOME_UNUSED_FUNCTION
3015/** \#TS(err) - 0a. */
3016DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
3017{
3018 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3019}
3020#endif
3021
3022
3023/** \#TS(tr) - 0a. */
3024DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
3025{
3026 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3027 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
3028}
3029
3030
3031/** \#TS(0) - 0a. */
3032DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
3033{
3034 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3035 0, 0);
3036}
3037
3038
3039/** \#TS(err) - 0a. */
3040DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3041{
3042 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3043 uSel & X86_SEL_MASK_OFF_RPL, 0);
3044}
3045
3046
3047/** \#NP(err) - 0b. */
3048DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
3049{
3050 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3051}
3052
3053
3054/** \#NP(seg) - 0b. */
3055DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
3056{
3057 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3058 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
3059}
3060
3061
3062/** \#NP(sel) - 0b. */
3063DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3064{
3065 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3066 uSel & ~X86_SEL_RPL, 0);
3067}
3068
3069
3070/** \#SS(seg) - 0c. */
3071DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3072{
3073 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3074 uSel & ~X86_SEL_RPL, 0);
3075}
3076
3077
3078/** \#GP(n) - 0d. */
3079DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
3080{
3081 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3082}
3083
3084
3085/** \#GP(0) - 0d. */
3086DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
3087{
3088 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3089}
3090
3091
3092/** \#GP(sel) - 0d. */
3093DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3094{
3095 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3096 Sel & ~X86_SEL_RPL, 0);
3097}
3098
3099
3100/** \#GP(0) - 0d. */
3101DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
3102{
3103 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3104}
3105
3106
3107/** \#GP(sel) - 0d. */
3108DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3109{
3110 NOREF(iSegReg); NOREF(fAccess);
3111 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
3112 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3113}
3114
3115
3116/** \#GP(sel) - 0d. */
3117DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3118{
3119 NOREF(Sel);
3120 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3121}
3122
3123
3124/** \#GP(sel) - 0d. */
3125DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3126{
3127 NOREF(iSegReg); NOREF(fAccess);
3128 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3129}
3130
3131
3132/** \#PF(n) - 0e. */
3133DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
3134{
3135 uint16_t uErr;
3136 switch (rc)
3137 {
3138 case VERR_PAGE_NOT_PRESENT:
3139 case VERR_PAGE_TABLE_NOT_PRESENT:
3140 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
3141 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
3142 uErr = 0;
3143 break;
3144
3145 default:
3146 AssertMsgFailed(("%Rrc\n", rc));
3147 case VERR_ACCESS_DENIED:
3148 uErr = X86_TRAP_PF_P;
3149 break;
3150
3151 /** @todo reserved */
3152 }
3153
3154 if (pIemCpu->uCpl == 3)
3155 uErr |= X86_TRAP_PF_US;
3156
3157 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
3158 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
3159 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
3160 uErr |= X86_TRAP_PF_ID;
3161
3162 /* Note! RW access callers reporting a WRITE protection fault, will clear
3163 the READ flag before calling. So, read-modify-write accesses (RW)
3164 can safely be reported as READ faults. */
3165 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
3166 uErr |= X86_TRAP_PF_RW;
3167
3168 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
3169 uErr, GCPtrWhere);
3170}
3171
3172
3173/** \#MF(0) - 10. */
3174DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
3175{
3176 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3177}
3178
3179
3180/** \#AC(0) - 11. */
3181DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
3182{
3183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3184}
3185
3186
3187/**
3188 * Macro for calling iemCImplRaiseDivideError().
3189 *
3190 * This enables us to add/remove arguments and force different levels of
3191 * inlining as we wish.
3192 *
3193 * @return Strict VBox status code.
3194 */
3195#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
3196IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3197{
3198 NOREF(cbInstr);
3199 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3200}
3201
3202
3203/**
3204 * Macro for calling iemCImplRaiseInvalidLockPrefix().
3205 *
3206 * This enables us to add/remove arguments and force different levels of
3207 * inlining as we wish.
3208 *
3209 * @return Strict VBox status code.
3210 */
3211#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
3212IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3213{
3214 NOREF(cbInstr);
3215 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3216}
3217
3218
3219/**
3220 * Macro for calling iemCImplRaiseInvalidOpcode().
3221 *
3222 * This enables us to add/remove arguments and force different levels of
3223 * inlining as we wish.
3224 *
3225 * @return Strict VBox status code.
3226 */
3227#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
3228IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3229{
3230 NOREF(cbInstr);
3231 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3232}
3233
3234
3235/** @} */
3236
3237
3238/*
3239 *
3240 * Helpers routines.
3241 * Helpers routines.
3242 * Helpers routines.
3243 *
3244 */
3245
3246/**
3247 * Recalculates the effective operand size.
3248 *
3249 * @param pIemCpu The IEM state.
3250 */
3251static void iemRecalEffOpSize(PIEMCPU pIemCpu)
3252{
3253 switch (pIemCpu->enmCpuMode)
3254 {
3255 case IEMMODE_16BIT:
3256 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
3257 break;
3258 case IEMMODE_32BIT:
3259 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
3260 break;
3261 case IEMMODE_64BIT:
3262 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
3263 {
3264 case 0:
3265 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
3266 break;
3267 case IEM_OP_PRF_SIZE_OP:
3268 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3269 break;
3270 case IEM_OP_PRF_SIZE_REX_W:
3271 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
3272 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3273 break;
3274 }
3275 break;
3276 default:
3277 AssertFailed();
3278 }
3279}
3280
3281
3282/**
3283 * Sets the default operand size to 64-bit and recalculates the effective
3284 * operand size.
3285 *
3286 * @param pIemCpu The IEM state.
3287 */
3288static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
3289{
3290 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3291 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
3292 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
3293 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3294 else
3295 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3296}
3297
3298
3299/*
3300 *
3301 * Common opcode decoders.
3302 * Common opcode decoders.
3303 * Common opcode decoders.
3304 *
3305 */
3306//#include <iprt/mem.h>
3307
3308/**
3309 * Used to add extra details about a stub case.
3310 * @param pIemCpu The IEM per CPU state.
3311 */
3312static void iemOpStubMsg2(PIEMCPU pIemCpu)
3313{
3314#if defined(LOG_ENABLED) && defined(IN_RING3)
3315 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3316 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3317 char szRegs[4096];
3318 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3319 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3320 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3321 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3322 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3323 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3324 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3325 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3326 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3327 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3328 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3329 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3330 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3331 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3332 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3333 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3334 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3335 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3336 " efer=%016VR{efer}\n"
3337 " pat=%016VR{pat}\n"
3338 " sf_mask=%016VR{sf_mask}\n"
3339 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3340 " lstar=%016VR{lstar}\n"
3341 " star=%016VR{star} cstar=%016VR{cstar}\n"
3342 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3343 );
3344
3345 char szInstr[256];
3346 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3347 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3348 szInstr, sizeof(szInstr), NULL);
3349
3350 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
3351#else
3352 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
3353#endif
3354}
3355
3356/**
3357 * Complains about a stub.
3358 *
3359 * Providing two versions of this macro, one for daily use and one for use when
3360 * working on IEM.
3361 */
3362#if 0
3363# define IEMOP_BITCH_ABOUT_STUB() \
3364 do { \
3365 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
3366 iemOpStubMsg2(pIemCpu); \
3367 RTAssertPanic(); \
3368 } while (0)
3369#else
3370# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
3371#endif
3372
3373/** Stubs an opcode. */
3374#define FNIEMOP_STUB(a_Name) \
3375 FNIEMOP_DEF(a_Name) \
3376 { \
3377 IEMOP_BITCH_ABOUT_STUB(); \
3378 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3379 } \
3380 typedef int ignore_semicolon
3381
3382/** Stubs an opcode. */
3383#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
3384 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3385 { \
3386 IEMOP_BITCH_ABOUT_STUB(); \
3387 NOREF(a_Name0); \
3388 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3389 } \
3390 typedef int ignore_semicolon
3391
3392/** Stubs an opcode which currently should raise \#UD. */
3393#define FNIEMOP_UD_STUB(a_Name) \
3394 FNIEMOP_DEF(a_Name) \
3395 { \
3396 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3397 return IEMOP_RAISE_INVALID_OPCODE(); \
3398 } \
3399 typedef int ignore_semicolon
3400
3401/** Stubs an opcode which currently should raise \#UD. */
3402#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
3403 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3404 { \
3405 NOREF(a_Name0); \
3406 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3407 return IEMOP_RAISE_INVALID_OPCODE(); \
3408 } \
3409 typedef int ignore_semicolon
3410
3411
3412
3413/** @name Register Access.
3414 * @{
3415 */
3416
3417/**
3418 * Gets a reference (pointer) to the specified hidden segment register.
3419 *
3420 * @returns Hidden register reference.
3421 * @param pIemCpu The per CPU data.
3422 * @param iSegReg The segment register.
3423 */
3424static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
3425{
3426 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3427 PCPUMSELREG pSReg;
3428 switch (iSegReg)
3429 {
3430 case X86_SREG_ES: pSReg = &pCtx->es; break;
3431 case X86_SREG_CS: pSReg = &pCtx->cs; break;
3432 case X86_SREG_SS: pSReg = &pCtx->ss; break;
3433 case X86_SREG_DS: pSReg = &pCtx->ds; break;
3434 case X86_SREG_FS: pSReg = &pCtx->fs; break;
3435 case X86_SREG_GS: pSReg = &pCtx->gs; break;
3436 default:
3437 AssertFailedReturn(NULL);
3438 }
3439#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3440 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
3441 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
3442#else
3443 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
3444#endif
3445 return pSReg;
3446}
3447
3448
3449/**
3450 * Gets a reference (pointer) to the specified segment register (the selector
3451 * value).
3452 *
3453 * @returns Pointer to the selector variable.
3454 * @param pIemCpu The per CPU data.
3455 * @param iSegReg The segment register.
3456 */
3457static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
3458{
3459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3460 switch (iSegReg)
3461 {
3462 case X86_SREG_ES: return &pCtx->es.Sel;
3463 case X86_SREG_CS: return &pCtx->cs.Sel;
3464 case X86_SREG_SS: return &pCtx->ss.Sel;
3465 case X86_SREG_DS: return &pCtx->ds.Sel;
3466 case X86_SREG_FS: return &pCtx->fs.Sel;
3467 case X86_SREG_GS: return &pCtx->gs.Sel;
3468 }
3469 AssertFailedReturn(NULL);
3470}
3471
3472
3473/**
3474 * Fetches the selector value of a segment register.
3475 *
3476 * @returns The selector value.
3477 * @param pIemCpu The per CPU data.
3478 * @param iSegReg The segment register.
3479 */
3480static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3481{
3482 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3483 switch (iSegReg)
3484 {
3485 case X86_SREG_ES: return pCtx->es.Sel;
3486 case X86_SREG_CS: return pCtx->cs.Sel;
3487 case X86_SREG_SS: return pCtx->ss.Sel;
3488 case X86_SREG_DS: return pCtx->ds.Sel;
3489 case X86_SREG_FS: return pCtx->fs.Sel;
3490 case X86_SREG_GS: return pCtx->gs.Sel;
3491 }
3492 AssertFailedReturn(0xffff);
3493}
3494
3495
3496/**
3497 * Gets a reference (pointer) to the specified general register.
3498 *
3499 * @returns Register reference.
3500 * @param pIemCpu The per CPU data.
3501 * @param iReg The general register.
3502 */
3503static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3504{
3505 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3506 switch (iReg)
3507 {
3508 case X86_GREG_xAX: return &pCtx->rax;
3509 case X86_GREG_xCX: return &pCtx->rcx;
3510 case X86_GREG_xDX: return &pCtx->rdx;
3511 case X86_GREG_xBX: return &pCtx->rbx;
3512 case X86_GREG_xSP: return &pCtx->rsp;
3513 case X86_GREG_xBP: return &pCtx->rbp;
3514 case X86_GREG_xSI: return &pCtx->rsi;
3515 case X86_GREG_xDI: return &pCtx->rdi;
3516 case X86_GREG_x8: return &pCtx->r8;
3517 case X86_GREG_x9: return &pCtx->r9;
3518 case X86_GREG_x10: return &pCtx->r10;
3519 case X86_GREG_x11: return &pCtx->r11;
3520 case X86_GREG_x12: return &pCtx->r12;
3521 case X86_GREG_x13: return &pCtx->r13;
3522 case X86_GREG_x14: return &pCtx->r14;
3523 case X86_GREG_x15: return &pCtx->r15;
3524 }
3525 AssertFailedReturn(NULL);
3526}
3527
3528
3529/**
3530 * Gets a reference (pointer) to the specified 8-bit general register.
3531 *
3532 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3533 *
3534 * @returns Register reference.
3535 * @param pIemCpu The per CPU data.
3536 * @param iReg The register.
3537 */
3538static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3539{
3540 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3541 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3542
3543 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3544 if (iReg >= 4)
3545 pu8Reg++;
3546 return pu8Reg;
3547}
3548
3549
3550/**
3551 * Fetches the value of a 8-bit general register.
3552 *
3553 * @returns The register value.
3554 * @param pIemCpu The per CPU data.
3555 * @param iReg The register.
3556 */
3557static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3558{
3559 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3560 return *pbSrc;
3561}
3562
3563
3564/**
3565 * Fetches the value of a 16-bit general register.
3566 *
3567 * @returns The register value.
3568 * @param pIemCpu The per CPU data.
3569 * @param iReg The register.
3570 */
3571static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3572{
3573 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3574}
3575
3576
3577/**
3578 * Fetches the value of a 32-bit general register.
3579 *
3580 * @returns The register value.
3581 * @param pIemCpu The per CPU data.
3582 * @param iReg The register.
3583 */
3584static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3585{
3586 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3587}
3588
3589
3590/**
3591 * Fetches the value of a 64-bit general register.
3592 *
3593 * @returns The register value.
3594 * @param pIemCpu The per CPU data.
3595 * @param iReg The register.
3596 */
3597static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3598{
3599 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3600}
3601
3602
3603/**
3604 * Is the FPU state in FXSAVE format or not.
3605 *
3606 * @returns true if it is, false if it's in FNSAVE.
3607 * @param pVCpu Pointer to the VMCPU.
3608 */
3609DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3610{
3611#ifdef RT_ARCH_AMD64
3612 NOREF(pIemCpu);
3613 return true;
3614#else
3615 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3616 return true;
3617#endif
3618}
3619
3620
3621/**
3622 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3623 *
3624 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3625 * segment limit.
3626 *
3627 * @param pIemCpu The per CPU data.
3628 * @param offNextInstr The offset of the next instruction.
3629 */
3630static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3631{
3632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3633 switch (pIemCpu->enmEffOpSize)
3634 {
3635 case IEMMODE_16BIT:
3636 {
3637 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3638 if ( uNewIp > pCtx->cs.u32Limit
3639 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3640 return iemRaiseGeneralProtectionFault0(pIemCpu);
3641 pCtx->rip = uNewIp;
3642 break;
3643 }
3644
3645 case IEMMODE_32BIT:
3646 {
3647 Assert(pCtx->rip <= UINT32_MAX);
3648 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3649
3650 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3651 if (uNewEip > pCtx->cs.u32Limit)
3652 return iemRaiseGeneralProtectionFault0(pIemCpu);
3653 pCtx->rip = uNewEip;
3654 break;
3655 }
3656
3657 case IEMMODE_64BIT:
3658 {
3659 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3660
3661 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3662 if (!IEM_IS_CANONICAL(uNewRip))
3663 return iemRaiseGeneralProtectionFault0(pIemCpu);
3664 pCtx->rip = uNewRip;
3665 break;
3666 }
3667
3668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3669 }
3670
3671 return VINF_SUCCESS;
3672}
3673
3674
3675/**
3676 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3677 *
3678 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3679 * segment limit.
3680 *
3681 * @returns Strict VBox status code.
3682 * @param pIemCpu The per CPU data.
3683 * @param offNextInstr The offset of the next instruction.
3684 */
3685static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3686{
3687 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3688 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3689
3690 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3691 if ( uNewIp > pCtx->cs.u32Limit
3692 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3693 return iemRaiseGeneralProtectionFault0(pIemCpu);
3694 /** @todo Test 16-bit jump in 64-bit mode. */
3695 pCtx->rip = uNewIp;
3696
3697 return VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3703 *
3704 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3705 * segment limit.
3706 *
3707 * @returns Strict VBox status code.
3708 * @param pIemCpu The per CPU data.
3709 * @param offNextInstr The offset of the next instruction.
3710 */
3711static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3712{
3713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3714 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3715
3716 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3717 {
3718 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3719
3720 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3721 if (uNewEip > pCtx->cs.u32Limit)
3722 return iemRaiseGeneralProtectionFault0(pIemCpu);
3723 pCtx->rip = uNewEip;
3724 }
3725 else
3726 {
3727 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3728
3729 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3730 if (!IEM_IS_CANONICAL(uNewRip))
3731 return iemRaiseGeneralProtectionFault0(pIemCpu);
3732 pCtx->rip = uNewRip;
3733 }
3734 return VINF_SUCCESS;
3735}
3736
3737
3738/**
3739 * Performs a near jump to the specified address.
3740 *
3741 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3742 * segment limit.
3743 *
3744 * @param pIemCpu The per CPU data.
3745 * @param uNewRip The new RIP value.
3746 */
3747static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3748{
3749 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3750 switch (pIemCpu->enmEffOpSize)
3751 {
3752 case IEMMODE_16BIT:
3753 {
3754 Assert(uNewRip <= UINT16_MAX);
3755 if ( uNewRip > pCtx->cs.u32Limit
3756 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3757 return iemRaiseGeneralProtectionFault0(pIemCpu);
3758 /** @todo Test 16-bit jump in 64-bit mode. */
3759 pCtx->rip = uNewRip;
3760 break;
3761 }
3762
3763 case IEMMODE_32BIT:
3764 {
3765 Assert(uNewRip <= UINT32_MAX);
3766 Assert(pCtx->rip <= UINT32_MAX);
3767 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3768
3769 if (uNewRip > pCtx->cs.u32Limit)
3770 return iemRaiseGeneralProtectionFault0(pIemCpu);
3771 pCtx->rip = uNewRip;
3772 break;
3773 }
3774
3775 case IEMMODE_64BIT:
3776 {
3777 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3778
3779 if (!IEM_IS_CANONICAL(uNewRip))
3780 return iemRaiseGeneralProtectionFault0(pIemCpu);
3781 pCtx->rip = uNewRip;
3782 break;
3783 }
3784
3785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3786 }
3787
3788 return VINF_SUCCESS;
3789}
3790
3791
3792/**
3793 * Get the address of the top of the stack.
3794 *
3795 * @param pIemCpu The per CPU data.
3796 * @param pCtx The CPU context which SP/ESP/RSP should be
3797 * read.
3798 */
3799DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
3800{
3801 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3802 return pCtx->rsp;
3803 if (pCtx->ss.Attr.n.u1DefBig)
3804 return pCtx->esp;
3805 return pCtx->sp;
3806}
3807
3808
3809/**
3810 * Updates the RIP/EIP/IP to point to the next instruction.
3811 *
3812 * @param pIemCpu The per CPU data.
3813 * @param cbInstr The number of bytes to add.
3814 */
3815static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3816{
3817 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3818 switch (pIemCpu->enmCpuMode)
3819 {
3820 case IEMMODE_16BIT:
3821 Assert(pCtx->rip <= UINT16_MAX);
3822 pCtx->eip += cbInstr;
3823 pCtx->eip &= UINT32_C(0xffff);
3824 break;
3825
3826 case IEMMODE_32BIT:
3827 pCtx->eip += cbInstr;
3828 Assert(pCtx->rip <= UINT32_MAX);
3829 break;
3830
3831 case IEMMODE_64BIT:
3832 pCtx->rip += cbInstr;
3833 break;
3834 default: AssertFailed();
3835 }
3836}
3837
3838
3839/**
3840 * Updates the RIP/EIP/IP to point to the next instruction.
3841 *
3842 * @param pIemCpu The per CPU data.
3843 */
3844static void iemRegUpdateRip(PIEMCPU pIemCpu)
3845{
3846 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3847}
3848
3849
3850/**
3851 * Adds to the stack pointer.
3852 *
3853 * @param pIemCpu The per CPU data.
3854 * @param pCtx The CPU context which SP/ESP/RSP should be
3855 * updated.
3856 * @param cbToAdd The number of bytes to add.
3857 */
3858DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
3859{
3860 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3861 pCtx->rsp += cbToAdd;
3862 else if (pCtx->ss.Attr.n.u1DefBig)
3863 pCtx->esp += cbToAdd;
3864 else
3865 pCtx->sp += cbToAdd;
3866}
3867
3868
3869/**
3870 * Subtracts from the stack pointer.
3871 *
3872 * @param pIemCpu The per CPU data.
3873 * @param pCtx The CPU context which SP/ESP/RSP should be
3874 * updated.
3875 * @param cbToSub The number of bytes to subtract.
3876 */
3877DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
3878{
3879 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3880 pCtx->rsp -= cbToSub;
3881 else if (pCtx->ss.Attr.n.u1DefBig)
3882 pCtx->esp -= cbToSub;
3883 else
3884 pCtx->sp -= cbToSub;
3885}
3886
3887
3888/**
3889 * Adds to the temporary stack pointer.
3890 *
3891 * @param pIemCpu The per CPU data.
3892 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3893 * @param cbToAdd The number of bytes to add.
3894 * @param pCtx Where to get the current stack mode.
3895 */
3896DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
3897{
3898 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3899 pTmpRsp->u += cbToAdd;
3900 else if (pCtx->ss.Attr.n.u1DefBig)
3901 pTmpRsp->DWords.dw0 += cbToAdd;
3902 else
3903 pTmpRsp->Words.w0 += cbToAdd;
3904}
3905
3906
3907/**
3908 * Subtracts from the temporary stack pointer.
3909 *
3910 * @param pIemCpu The per CPU data.
3911 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3912 * @param cbToSub The number of bytes to subtract.
3913 * @param pCtx Where to get the current stack mode.
3914 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3915 * expecting that.
3916 */
3917DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
3918{
3919 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3920 pTmpRsp->u -= cbToSub;
3921 else if (pCtx->ss.Attr.n.u1DefBig)
3922 pTmpRsp->DWords.dw0 -= cbToSub;
3923 else
3924 pTmpRsp->Words.w0 -= cbToSub;
3925}
3926
3927
3928/**
3929 * Calculates the effective stack address for a push of the specified size as
3930 * well as the new RSP value (upper bits may be masked).
3931 *
3932 * @returns Effective stack addressf for the push.
3933 * @param pIemCpu The IEM per CPU data.
3934 * @param pCtx Where to get the current stack mode.
3935 * @param cbItem The size of the stack item to pop.
3936 * @param puNewRsp Where to return the new RSP value.
3937 */
3938DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3939{
3940 RTUINT64U uTmpRsp;
3941 RTGCPTR GCPtrTop;
3942 uTmpRsp.u = pCtx->rsp;
3943
3944 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3945 GCPtrTop = uTmpRsp.u -= cbItem;
3946 else if (pCtx->ss.Attr.n.u1DefBig)
3947 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3948 else
3949 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3950 *puNewRsp = uTmpRsp.u;
3951 return GCPtrTop;
3952}
3953
3954
3955/**
3956 * Gets the current stack pointer and calculates the value after a pop of the
3957 * specified size.
3958 *
3959 * @returns Current stack pointer.
3960 * @param pIemCpu The per CPU data.
3961 * @param pCtx Where to get the current stack mode.
3962 * @param cbItem The size of the stack item to pop.
3963 * @param puNewRsp Where to return the new RSP value.
3964 */
3965DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3966{
3967 RTUINT64U uTmpRsp;
3968 RTGCPTR GCPtrTop;
3969 uTmpRsp.u = pCtx->rsp;
3970
3971 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3972 {
3973 GCPtrTop = uTmpRsp.u;
3974 uTmpRsp.u += cbItem;
3975 }
3976 else if (pCtx->ss.Attr.n.u1DefBig)
3977 {
3978 GCPtrTop = uTmpRsp.DWords.dw0;
3979 uTmpRsp.DWords.dw0 += cbItem;
3980 }
3981 else
3982 {
3983 GCPtrTop = uTmpRsp.Words.w0;
3984 uTmpRsp.Words.w0 += cbItem;
3985 }
3986 *puNewRsp = uTmpRsp.u;
3987 return GCPtrTop;
3988}
3989
3990
3991/**
3992 * Calculates the effective stack address for a push of the specified size as
3993 * well as the new temporary RSP value (upper bits may be masked).
3994 *
3995 * @returns Effective stack addressf for the push.
3996 * @param pIemCpu The per CPU data.
3997 * @param pTmpRsp The temporary stack pointer. This is updated.
3998 * @param cbItem The size of the stack item to pop.
3999 * @param puNewRsp Where to return the new RSP value.
4000 */
4001DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
4002{
4003 RTGCPTR GCPtrTop;
4004
4005 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4006 GCPtrTop = pTmpRsp->u -= cbItem;
4007 else if (pCtx->ss.Attr.n.u1DefBig)
4008 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
4009 else
4010 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
4011 return GCPtrTop;
4012}
4013
4014
4015/**
4016 * Gets the effective stack address for a pop of the specified size and
4017 * calculates and updates the temporary RSP.
4018 *
4019 * @returns Current stack pointer.
4020 * @param pIemCpu The per CPU data.
4021 * @param pTmpRsp The temporary stack pointer. This is updated.
4022 * @param pCtx Where to get the current stack mode.
4023 * @param cbItem The size of the stack item to pop.
4024 */
4025DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
4026{
4027 RTGCPTR GCPtrTop;
4028 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4029 {
4030 GCPtrTop = pTmpRsp->u;
4031 pTmpRsp->u += cbItem;
4032 }
4033 else if (pCtx->ss.Attr.n.u1DefBig)
4034 {
4035 GCPtrTop = pTmpRsp->DWords.dw0;
4036 pTmpRsp->DWords.dw0 += cbItem;
4037 }
4038 else
4039 {
4040 GCPtrTop = pTmpRsp->Words.w0;
4041 pTmpRsp->Words.w0 += cbItem;
4042 }
4043 return GCPtrTop;
4044}
4045
4046
4047/**
4048 * Checks if an Intel CPUID feature bit is set.
4049 *
4050 * @returns true / false.
4051 *
4052 * @param pIemCpu The IEM per CPU data.
4053 * @param fEdx The EDX bit to test, or 0 if ECX.
4054 * @param fEcx The ECX bit to test, or 0 if EDX.
4055 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
4056 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
4057 */
4058static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
4059{
4060 uint32_t uEax, uEbx, uEcx, uEdx;
4061 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
4062 return (fEcx && (uEcx & fEcx))
4063 || (fEdx && (uEdx & fEdx));
4064}
4065
4066
4067/**
4068 * Checks if an AMD CPUID feature bit is set.
4069 *
4070 * @returns true / false.
4071 *
4072 * @param pIemCpu The IEM per CPU data.
4073 * @param fEdx The EDX bit to test, or 0 if ECX.
4074 * @param fEcx The ECX bit to test, or 0 if EDX.
4075 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
4076 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
4077 */
4078static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
4079{
4080 uint32_t uEax, uEbx, uEcx, uEdx;
4081 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
4082 return (fEcx && (uEcx & fEcx))
4083 || (fEdx && (uEdx & fEdx));
4084}
4085
4086/** @} */
4087
4088
4089/** @name FPU access and helpers.
4090 *
4091 * @{
4092 */
4093
4094
4095/**
4096 * Hook for preparing to use the host FPU.
4097 *
4098 * This is necessary in ring-0 and raw-mode context.
4099 *
4100 * @param pIemCpu The IEM per CPU data.
4101 */
4102DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
4103{
4104#ifdef IN_RING3
4105 NOREF(pIemCpu);
4106#else
4107/** @todo RZ: FIXME */
4108//# error "Implement me"
4109#endif
4110}
4111
4112
4113/**
4114 * Hook for preparing to use the host FPU for SSE
4115 *
4116 * This is necessary in ring-0 and raw-mode context.
4117 *
4118 * @param pIemCpu The IEM per CPU data.
4119 */
4120DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
4121{
4122 iemFpuPrepareUsage(pIemCpu);
4123}
4124
4125
4126/**
4127 * Stores a QNaN value into a FPU register.
4128 *
4129 * @param pReg Pointer to the register.
4130 */
4131DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
4132{
4133 pReg->au32[0] = UINT32_C(0x00000000);
4134 pReg->au32[1] = UINT32_C(0xc0000000);
4135 pReg->au16[4] = UINT16_C(0xffff);
4136}
4137
4138
4139/**
4140 * Updates the FOP, FPU.CS and FPUIP registers.
4141 *
4142 * @param pIemCpu The IEM per CPU data.
4143 * @param pCtx The CPU context.
4144 */
4145DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4146{
4147 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
4148 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
4149 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
4150 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4151 {
4152 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
4153 * happens in real mode here based on the fnsave and fnstenv images. */
4154 pCtx->fpu.CS = 0;
4155 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
4156 }
4157 else
4158 {
4159 pCtx->fpu.CS = pCtx->cs.Sel;
4160 pCtx->fpu.FPUIP = pCtx->rip;
4161 }
4162}
4163
4164
4165/**
4166 * Updates the FPU.DS and FPUDP registers.
4167 *
4168 * @param pIemCpu The IEM per CPU data.
4169 * @param pCtx The CPU context.
4170 * @param iEffSeg The effective segment register.
4171 * @param GCPtrEff The effective address relative to @a iEffSeg.
4172 */
4173DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4174{
4175 RTSEL sel;
4176 switch (iEffSeg)
4177 {
4178 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
4179 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
4180 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
4181 case X86_SREG_ES: sel = pCtx->es.Sel; break;
4182 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
4183 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
4184 default:
4185 AssertMsgFailed(("%d\n", iEffSeg));
4186 sel = pCtx->ds.Sel;
4187 }
4188 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
4189 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4190 {
4191 pCtx->fpu.DS = 0;
4192 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
4193 }
4194 else
4195 {
4196 pCtx->fpu.DS = sel;
4197 pCtx->fpu.FPUDP = GCPtrEff;
4198 }
4199}
4200
4201
4202/**
4203 * Rotates the stack registers in the push direction.
4204 *
4205 * @param pCtx The CPU context.
4206 * @remarks This is a complete waste of time, but fxsave stores the registers in
4207 * stack order.
4208 */
4209DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
4210{
4211 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
4212 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
4213 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
4214 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
4215 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
4216 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
4217 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
4218 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
4219 pCtx->fpu.aRegs[0].r80 = r80Tmp;
4220}
4221
4222
4223/**
4224 * Rotates the stack registers in the pop direction.
4225 *
4226 * @param pCtx The CPU context.
4227 * @remarks This is a complete waste of time, but fxsave stores the registers in
4228 * stack order.
4229 */
4230DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
4231{
4232 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
4233 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
4234 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
4235 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
4236 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
4237 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
4238 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
4239 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
4240 pCtx->fpu.aRegs[7].r80 = r80Tmp;
4241}
4242
4243
4244/**
4245 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4246 * exception prevents it.
4247 *
4248 * @param pIemCpu The IEM per CPU data.
4249 * @param pResult The FPU operation result to push.
4250 * @param pCtx The CPU context.
4251 */
4252static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
4253{
4254 /* Update FSW and bail if there are pending exceptions afterwards. */
4255 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4256 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4257 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4258 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4259 {
4260 pCtx->fpu.FSW = fFsw;
4261 return;
4262 }
4263
4264 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4265 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4266 {
4267 /* All is fine, push the actual value. */
4268 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4269 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
4270 }
4271 else if (pCtx->fpu.FCW & X86_FCW_IM)
4272 {
4273 /* Masked stack overflow, push QNaN. */
4274 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4275 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4276 }
4277 else
4278 {
4279 /* Raise stack overflow, don't push anything. */
4280 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4281 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4282 return;
4283 }
4284
4285 fFsw &= ~X86_FSW_TOP_MASK;
4286 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4287 pCtx->fpu.FSW = fFsw;
4288
4289 iemFpuRotateStackPush(pCtx);
4290}
4291
4292
4293/**
4294 * Stores a result in a FPU register and updates the FSW and FTW.
4295 *
4296 * @param pIemCpu The IEM per CPU data.
4297 * @param pResult The result to store.
4298 * @param iStReg Which FPU register to store it in.
4299 * @param pCtx The CPU context.
4300 */
4301static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
4302{
4303 Assert(iStReg < 8);
4304 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4305 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4306 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4307 pCtx->fpu.FTW |= RT_BIT(iReg);
4308 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
4309}
4310
4311
4312/**
4313 * Only updates the FPU status word (FSW) with the result of the current
4314 * instruction.
4315 *
4316 * @param pCtx The CPU context.
4317 * @param u16FSW The FSW output of the current instruction.
4318 */
4319static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
4320{
4321 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4322 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4323}
4324
4325
4326/**
4327 * Pops one item off the FPU stack if no pending exception prevents it.
4328 *
4329 * @param pCtx The CPU context.
4330 */
4331static void iemFpuMaybePopOne(PCPUMCTX pCtx)
4332{
4333 /* Check pending exceptions. */
4334 uint16_t uFSW = pCtx->fpu.FSW;
4335 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4336 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4337 return;
4338
4339 /* TOP--. */
4340 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4341 uFSW &= ~X86_FSW_TOP_MASK;
4342 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4343 pCtx->fpu.FSW = uFSW;
4344
4345 /* Mark the previous ST0 as empty. */
4346 iOldTop >>= X86_FSW_TOP_SHIFT;
4347 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
4348
4349 /* Rotate the registers. */
4350 iemFpuRotateStackPop(pCtx);
4351}
4352
4353
4354/**
4355 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4356 *
4357 * @param pIemCpu The IEM per CPU data.
4358 * @param pResult The FPU operation result to push.
4359 */
4360static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
4361{
4362 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4363 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4364 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4365}
4366
4367
4368/**
4369 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4370 * and sets FPUDP and FPUDS.
4371 *
4372 * @param pIemCpu The IEM per CPU data.
4373 * @param pResult The FPU operation result to push.
4374 * @param iEffSeg The effective segment register.
4375 * @param GCPtrEff The effective address relative to @a iEffSeg.
4376 */
4377static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4378{
4379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4380 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4381 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4382 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4383}
4384
4385
4386/**
4387 * Replace ST0 with the first value and push the second onto the FPU stack,
4388 * unless a pending exception prevents it.
4389 *
4390 * @param pIemCpu The IEM per CPU data.
4391 * @param pResult The FPU operation result to store and push.
4392 */
4393static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
4394{
4395 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4396 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4397
4398 /* Update FSW and bail if there are pending exceptions afterwards. */
4399 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4400 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4401 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4402 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4403 {
4404 pCtx->fpu.FSW = fFsw;
4405 return;
4406 }
4407
4408 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4409 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4410 {
4411 /* All is fine, push the actual value. */
4412 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4413 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
4414 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
4415 }
4416 else if (pCtx->fpu.FCW & X86_FCW_IM)
4417 {
4418 /* Masked stack overflow, push QNaN. */
4419 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4420 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4421 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4422 }
4423 else
4424 {
4425 /* Raise stack overflow, don't push anything. */
4426 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4427 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4428 return;
4429 }
4430
4431 fFsw &= ~X86_FSW_TOP_MASK;
4432 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4433 pCtx->fpu.FSW = fFsw;
4434
4435 iemFpuRotateStackPush(pCtx);
4436}
4437
4438
4439/**
4440 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4441 * FOP.
4442 *
4443 * @param pIemCpu The IEM per CPU data.
4444 * @param pResult The result to store.
4445 * @param iStReg Which FPU register to store it in.
4446 * @param pCtx The CPU context.
4447 */
4448static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4449{
4450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4451 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4452 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4453}
4454
4455
4456/**
4457 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4458 * FOP, and then pops the stack.
4459 *
4460 * @param pIemCpu The IEM per CPU data.
4461 * @param pResult The result to store.
4462 * @param iStReg Which FPU register to store it in.
4463 * @param pCtx The CPU context.
4464 */
4465static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4466{
4467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4468 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4469 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4470 iemFpuMaybePopOne(pCtx);
4471}
4472
4473
4474/**
4475 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4476 * FPUDP, and FPUDS.
4477 *
4478 * @param pIemCpu The IEM per CPU data.
4479 * @param pResult The result to store.
4480 * @param iStReg Which FPU register to store it in.
4481 * @param pCtx The CPU context.
4482 * @param iEffSeg The effective memory operand selector register.
4483 * @param GCPtrEff The effective memory operand offset.
4484 */
4485static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4486{
4487 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4488 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
4489 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4490 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4491}
4492
4493
4494/**
4495 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4496 * FPUDP, and FPUDS, and then pops the stack.
4497 *
4498 * @param pIemCpu The IEM per CPU data.
4499 * @param pResult The result to store.
4500 * @param iStReg Which FPU register to store it in.
4501 * @param pCtx The CPU context.
4502 * @param iEffSeg The effective memory operand selector register.
4503 * @param GCPtrEff The effective memory operand offset.
4504 */
4505static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4506 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4507{
4508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4509 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4510 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4511 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4512 iemFpuMaybePopOne(pCtx);
4513}
4514
4515
4516/**
4517 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4518 *
4519 * @param pIemCpu The IEM per CPU data.
4520 */
4521static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4522{
4523 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4524}
4525
4526
4527/**
4528 * Marks the specified stack register as free (for FFREE).
4529 *
4530 * @param pIemCpu The IEM per CPU data.
4531 * @param iStReg The register to free.
4532 */
4533static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4534{
4535 Assert(iStReg < 8);
4536 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4537 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4538 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4539}
4540
4541
4542/**
4543 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4544 *
4545 * @param pIemCpu The IEM per CPU data.
4546 */
4547static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4548{
4549 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4550 uint16_t uFsw = pCtx->fpu.FSW;
4551 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4552 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4553 uFsw &= ~X86_FSW_TOP_MASK;
4554 uFsw |= uTop;
4555 pCtx->fpu.FSW = uFsw;
4556}
4557
4558
4559/**
4560 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4561 *
4562 * @param pIemCpu The IEM per CPU data.
4563 */
4564static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4565{
4566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4567 uint16_t uFsw = pCtx->fpu.FSW;
4568 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4569 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4570 uFsw &= ~X86_FSW_TOP_MASK;
4571 uFsw |= uTop;
4572 pCtx->fpu.FSW = uFsw;
4573}
4574
4575
4576/**
4577 * Updates the FSW, FOP, FPUIP, and FPUCS.
4578 *
4579 * @param pIemCpu The IEM per CPU data.
4580 * @param u16FSW The FSW from the current instruction.
4581 */
4582static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4583{
4584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4585 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4586 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4587}
4588
4589
4590/**
4591 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4592 *
4593 * @param pIemCpu The IEM per CPU data.
4594 * @param u16FSW The FSW from the current instruction.
4595 */
4596static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4597{
4598 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4599 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4600 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4601 iemFpuMaybePopOne(pCtx);
4602}
4603
4604
4605/**
4606 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4607 *
4608 * @param pIemCpu The IEM per CPU data.
4609 * @param u16FSW The FSW from the current instruction.
4610 * @param iEffSeg The effective memory operand selector register.
4611 * @param GCPtrEff The effective memory operand offset.
4612 */
4613static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4614{
4615 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4616 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4617 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4618 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4619}
4620
4621
4622/**
4623 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4624 *
4625 * @param pIemCpu The IEM per CPU data.
4626 * @param u16FSW The FSW from the current instruction.
4627 */
4628static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4629{
4630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4631 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4632 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4633 iemFpuMaybePopOne(pCtx);
4634 iemFpuMaybePopOne(pCtx);
4635}
4636
4637
4638/**
4639 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4640 *
4641 * @param pIemCpu The IEM per CPU data.
4642 * @param u16FSW The FSW from the current instruction.
4643 * @param iEffSeg The effective memory operand selector register.
4644 * @param GCPtrEff The effective memory operand offset.
4645 */
4646static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4647{
4648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4649 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4650 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4651 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4652 iemFpuMaybePopOne(pCtx);
4653}
4654
4655
4656/**
4657 * Worker routine for raising an FPU stack underflow exception.
4658 *
4659 * @param pIemCpu The IEM per CPU data.
4660 * @param iStReg The stack register being accessed.
4661 * @param pCtx The CPU context.
4662 */
4663static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4664{
4665 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4666 if (pCtx->fpu.FCW & X86_FCW_IM)
4667 {
4668 /* Masked underflow. */
4669 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4670 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4671 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4672 if (iStReg != UINT8_MAX)
4673 {
4674 pCtx->fpu.FTW |= RT_BIT(iReg);
4675 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4676 }
4677 }
4678 else
4679 {
4680 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4681 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4682 }
4683}
4684
4685
4686/**
4687 * Raises a FPU stack underflow exception.
4688 *
4689 * @param pIemCpu The IEM per CPU data.
4690 * @param iStReg The destination register that should be loaded
4691 * with QNaN if \#IS is not masked. Specify
4692 * UINT8_MAX if none (like for fcom).
4693 */
4694DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4695{
4696 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4697 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4698 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4699}
4700
4701
4702DECL_NO_INLINE(static, void)
4703iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4704{
4705 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4706 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4707 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4708 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4709}
4710
4711
4712DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4713{
4714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4715 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4716 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4717 iemFpuMaybePopOne(pCtx);
4718}
4719
4720
4721DECL_NO_INLINE(static, void)
4722iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4723{
4724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4725 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4726 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4727 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4728 iemFpuMaybePopOne(pCtx);
4729}
4730
4731
4732DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4733{
4734 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4735 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4736 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4737 iemFpuMaybePopOne(pCtx);
4738 iemFpuMaybePopOne(pCtx);
4739}
4740
4741
4742DECL_NO_INLINE(static, void)
4743iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4744{
4745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4746 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4747
4748 if (pCtx->fpu.FCW & X86_FCW_IM)
4749 {
4750 /* Masked overflow - Push QNaN. */
4751 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4752 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4753 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4754 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4755 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4756 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4757 iemFpuRotateStackPush(pCtx);
4758 }
4759 else
4760 {
4761 /* Exception pending - don't change TOP or the register stack. */
4762 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4763 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4764 }
4765}
4766
4767
4768DECL_NO_INLINE(static, void)
4769iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4770{
4771 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4772 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4773
4774 if (pCtx->fpu.FCW & X86_FCW_IM)
4775 {
4776 /* Masked overflow - Push QNaN. */
4777 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4778 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4779 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4780 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4781 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4782 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4783 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4784 iemFpuRotateStackPush(pCtx);
4785 }
4786 else
4787 {
4788 /* Exception pending - don't change TOP or the register stack. */
4789 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4790 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4791 }
4792}
4793
4794
4795/**
4796 * Worker routine for raising an FPU stack overflow exception on a push.
4797 *
4798 * @param pIemCpu The IEM per CPU data.
4799 * @param pCtx The CPU context.
4800 */
4801static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4802{
4803 if (pCtx->fpu.FCW & X86_FCW_IM)
4804 {
4805 /* Masked overflow. */
4806 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4807 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4808 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4809 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4810 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4811 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4812 iemFpuRotateStackPush(pCtx);
4813 }
4814 else
4815 {
4816 /* Exception pending - don't change TOP or the register stack. */
4817 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4818 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4819 }
4820}
4821
4822
4823/**
4824 * Raises a FPU stack overflow exception on a push.
4825 *
4826 * @param pIemCpu The IEM per CPU data.
4827 */
4828DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4829{
4830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4831 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4832 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4833}
4834
4835
4836/**
4837 * Raises a FPU stack overflow exception on a push with a memory operand.
4838 *
4839 * @param pIemCpu The IEM per CPU data.
4840 * @param iEffSeg The effective memory operand selector register.
4841 * @param GCPtrEff The effective memory operand offset.
4842 */
4843DECL_NO_INLINE(static, void)
4844iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4845{
4846 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4847 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4848 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4849 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4850}
4851
4852
4853static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4854{
4855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4856 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4857 if (pCtx->fpu.FTW & RT_BIT(iReg))
4858 return VINF_SUCCESS;
4859 return VERR_NOT_FOUND;
4860}
4861
4862
4863static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4864{
4865 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4866 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4867 if (pCtx->fpu.FTW & RT_BIT(iReg))
4868 {
4869 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4870 return VINF_SUCCESS;
4871 }
4872 return VERR_NOT_FOUND;
4873}
4874
4875
4876static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4877 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4878{
4879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4880 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4881 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4882 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4883 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4884 {
4885 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4886 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4887 return VINF_SUCCESS;
4888 }
4889 return VERR_NOT_FOUND;
4890}
4891
4892
4893static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4894{
4895 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4896 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4897 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4898 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4899 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4900 {
4901 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4902 return VINF_SUCCESS;
4903 }
4904 return VERR_NOT_FOUND;
4905}
4906
4907
4908/**
4909 * Updates the FPU exception status after FCW is changed.
4910 *
4911 * @param pCtx The CPU context.
4912 */
4913static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4914{
4915 uint16_t u16Fsw = pCtx->fpu.FSW;
4916 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4917 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4918 else
4919 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4920 pCtx->fpu.FSW = u16Fsw;
4921}
4922
4923
4924/**
4925 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4926 *
4927 * @returns The full FTW.
4928 * @param pCtx The CPU state.
4929 */
4930static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4931{
4932 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4933 uint16_t u16Ftw = 0;
4934 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4935 for (unsigned iSt = 0; iSt < 8; iSt++)
4936 {
4937 unsigned const iReg = (iSt + iTop) & 7;
4938 if (!(u8Ftw & RT_BIT(iReg)))
4939 u16Ftw |= 3 << (iReg * 2); /* empty */
4940 else
4941 {
4942 uint16_t uTag;
4943 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4944 if (pr80Reg->s.uExponent == 0x7fff)
4945 uTag = 2; /* Exponent is all 1's => Special. */
4946 else if (pr80Reg->s.uExponent == 0x0000)
4947 {
4948 if (pr80Reg->s.u64Mantissa == 0x0000)
4949 uTag = 1; /* All bits are zero => Zero. */
4950 else
4951 uTag = 2; /* Must be special. */
4952 }
4953 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4954 uTag = 0; /* Valid. */
4955 else
4956 uTag = 2; /* Must be special. */
4957
4958 u16Ftw |= uTag << (iReg * 2); /* empty */
4959 }
4960 }
4961
4962 return u16Ftw;
4963}
4964
4965
4966/**
4967 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4968 *
4969 * @returns The compressed FTW.
4970 * @param u16FullFtw The full FTW to convert.
4971 */
4972static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4973{
4974 uint8_t u8Ftw = 0;
4975 for (unsigned i = 0; i < 8; i++)
4976 {
4977 if ((u16FullFtw & 3) != 3 /*empty*/)
4978 u8Ftw |= RT_BIT(i);
4979 u16FullFtw >>= 2;
4980 }
4981
4982 return u8Ftw;
4983}
4984
4985/** @} */
4986
4987
4988/** @name Memory access.
4989 *
4990 * @{
4991 */
4992
4993
4994/**
4995 * Updates the IEMCPU::cbWritten counter if applicable.
4996 *
4997 * @param pIemCpu The IEM per CPU data.
4998 * @param fAccess The access being accounted for.
4999 * @param cbMem The access size.
5000 */
5001DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
5002{
5003 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5004 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5005 pIemCpu->cbWritten += (uint32_t)cbMem;
5006}
5007
5008
5009/**
5010 * Checks if the given segment can be written to, raise the appropriate
5011 * exception if not.
5012 *
5013 * @returns VBox strict status code.
5014 *
5015 * @param pIemCpu The IEM per CPU data.
5016 * @param pHid Pointer to the hidden register.
5017 * @param iSegReg The register number.
5018 * @param pu64BaseAddr Where to return the base address to use for the
5019 * segment. (In 64-bit code it may differ from the
5020 * base in the hidden segment.)
5021 */
5022static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
5023{
5024 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5025 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
5026 else
5027 {
5028 if (!pHid->Attr.n.u1Present)
5029 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
5030
5031 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
5032 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5033 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
5034 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
5035 *pu64BaseAddr = pHid->u64Base;
5036 }
5037 return VINF_SUCCESS;
5038}
5039
5040
5041/**
5042 * Checks if the given segment can be read from, raise the appropriate
5043 * exception if not.
5044 *
5045 * @returns VBox strict status code.
5046 *
5047 * @param pIemCpu The IEM per CPU data.
5048 * @param pHid Pointer to the hidden register.
5049 * @param iSegReg The register number.
5050 * @param pu64BaseAddr Where to return the base address to use for the
5051 * segment. (In 64-bit code it may differ from the
5052 * base in the hidden segment.)
5053 */
5054static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
5055{
5056 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5057 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
5058 else
5059 {
5060 if (!pHid->Attr.n.u1Present)
5061 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
5062
5063 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
5064 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
5065 *pu64BaseAddr = pHid->u64Base;
5066 }
5067 return VINF_SUCCESS;
5068}
5069
5070
5071/**
5072 * Applies the segment limit, base and attributes.
5073 *
5074 * This may raise a \#GP or \#SS.
5075 *
5076 * @returns VBox strict status code.
5077 *
5078 * @param pIemCpu The IEM per CPU data.
5079 * @param fAccess The kind of access which is being performed.
5080 * @param iSegReg The index of the segment register to apply.
5081 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5082 * TSS, ++).
5083 * @param pGCPtrMem Pointer to the guest memory address to apply
5084 * segmentation to. Input and output parameter.
5085 */
5086static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
5087 size_t cbMem, PRTGCPTR pGCPtrMem)
5088{
5089 if (iSegReg == UINT8_MAX)
5090 return VINF_SUCCESS;
5091
5092 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
5093 switch (pIemCpu->enmCpuMode)
5094 {
5095 case IEMMODE_16BIT:
5096 case IEMMODE_32BIT:
5097 {
5098 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5099 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5100
5101 Assert(pSel->Attr.n.u1Present);
5102 Assert(pSel->Attr.n.u1DescType);
5103 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5104 {
5105 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5106 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5107 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5108
5109 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5110 {
5111 /** @todo CPL check. */
5112 }
5113
5114 /*
5115 * There are two kinds of data selectors, normal and expand down.
5116 */
5117 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5118 {
5119 if ( GCPtrFirst32 > pSel->u32Limit
5120 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5121 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5122 }
5123 else
5124 {
5125 /*
5126 * The upper boundary is defined by the B bit, not the G bit!
5127 */
5128 if ( GCPtrFirst32 < pSel->u32Limit + 1
5129 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? 0xFFFFFFFF : 0xFFFF))
5130 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5131
5132 }
5133 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5134 }
5135 else
5136 {
5137
5138 /*
5139 * Code selector and usually be used to read thru, writing is
5140 * only permitted in real and V8086 mode.
5141 */
5142 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5143 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5144 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5145 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
5146 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5147
5148 if ( GCPtrFirst32 > pSel->u32Limit
5149 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5150 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5151
5152 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5153 {
5154 /** @todo CPL check. */
5155 }
5156
5157 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5158 }
5159 return VINF_SUCCESS;
5160 }
5161
5162 case IEMMODE_64BIT:
5163 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5164 *pGCPtrMem += pSel->u64Base;
5165 return VINF_SUCCESS;
5166
5167 default:
5168 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
5169 }
5170}
5171
5172
5173/**
5174 * Translates a virtual address to a physical physical address and checks if we
5175 * can access the page as specified.
5176 *
5177 * @param pIemCpu The IEM per CPU data.
5178 * @param GCPtrMem The virtual address.
5179 * @param fAccess The intended access.
5180 * @param pGCPhysMem Where to return the physical address.
5181 */
5182static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
5183 PRTGCPHYS pGCPhysMem)
5184{
5185 /** @todo Need a different PGM interface here. We're currently using
5186 * generic / REM interfaces. this won't cut it for R0 & RC. */
5187 RTGCPHYS GCPhys;
5188 uint64_t fFlags;
5189 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
5190 if (RT_FAILURE(rc))
5191 {
5192 /** @todo Check unassigned memory in unpaged mode. */
5193 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5194 *pGCPhysMem = NIL_RTGCPHYS;
5195 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
5196 }
5197
5198 /* If the page is writable and does not have the no-exec bit set, all
5199 access is allowed. Otherwise we'll have to check more carefully... */
5200 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5201 {
5202 /* Write to read only memory? */
5203 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5204 && !(fFlags & X86_PTE_RW)
5205 && ( pIemCpu->uCpl != 0
5206 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
5207 {
5208 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5209 *pGCPhysMem = NIL_RTGCPHYS;
5210 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5211 }
5212
5213 /* Kernel memory accessed by userland? */
5214 if ( !(fFlags & X86_PTE_US)
5215 && pIemCpu->uCpl == 3
5216 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5217 {
5218 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5219 *pGCPhysMem = NIL_RTGCPHYS;
5220 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5221 }
5222
5223 /* Executing non-executable memory? */
5224 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5225 && (fFlags & X86_PTE_PAE_NX)
5226 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
5227 {
5228 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5229 *pGCPhysMem = NIL_RTGCPHYS;
5230 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5231 VERR_ACCESS_DENIED);
5232 }
5233 }
5234
5235 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
5236 *pGCPhysMem = GCPhys;
5237 return VINF_SUCCESS;
5238}
5239
5240
5241
5242/**
5243 * Maps a physical page.
5244 *
5245 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
5246 * @param pIemCpu The IEM per CPU data.
5247 * @param GCPhysMem The physical address.
5248 * @param fAccess The intended access.
5249 * @param ppvMem Where to return the mapping address.
5250 * @param pLock The PGM lock.
5251 */
5252static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
5253{
5254#ifdef IEM_VERIFICATION_MODE_FULL
5255 /* Force the alternative path so we can ignore writes. */
5256 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
5257 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5258#endif
5259#ifdef IEM_LOG_MEMORY_WRITES
5260 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5261 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5262#endif
5263#ifdef IEM_VERIFICATION_MODE_MINIMAL
5264 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5265#endif
5266
5267 /** @todo This API may require some improving later. A private deal with PGM
5268 * regarding locking and unlocking needs to be struct. A couple of TLBs
5269 * living in PGM, but with publicly accessible inlined access methods
5270 * could perhaps be an even better solution. */
5271 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
5272 GCPhysMem,
5273 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
5274 pIemCpu->fBypassHandlers,
5275 ppvMem,
5276 pLock);
5277 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
5278 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5279 return rc;
5280}
5281
5282
5283/**
5284 * Unmap a page previously mapped by iemMemPageMap.
5285 *
5286 * @param pIemCpu The IEM per CPU data.
5287 * @param GCPhysMem The physical address.
5288 * @param fAccess The intended access.
5289 * @param pvMem What iemMemPageMap returned.
5290 * @param pLock The PGM lock.
5291 */
5292DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
5293{
5294 NOREF(pIemCpu);
5295 NOREF(GCPhysMem);
5296 NOREF(fAccess);
5297 NOREF(pvMem);
5298 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
5299}
5300
5301
5302/**
5303 * Looks up a memory mapping entry.
5304 *
5305 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5306 * @param pIemCpu The IEM per CPU data.
5307 * @param pvMem The memory address.
5308 * @param fAccess The access to.
5309 */
5310DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5311{
5312 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5313 if ( pIemCpu->aMemMappings[0].pv == pvMem
5314 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5315 return 0;
5316 if ( pIemCpu->aMemMappings[1].pv == pvMem
5317 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5318 return 1;
5319 if ( pIemCpu->aMemMappings[2].pv == pvMem
5320 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5321 return 2;
5322 return VERR_NOT_FOUND;
5323}
5324
5325
5326/**
5327 * Finds a free memmap entry when using iNextMapping doesn't work.
5328 *
5329 * @returns Memory mapping index, 1024 on failure.
5330 * @param pIemCpu The IEM per CPU data.
5331 */
5332static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
5333{
5334 /*
5335 * The easy case.
5336 */
5337 if (pIemCpu->cActiveMappings == 0)
5338 {
5339 pIemCpu->iNextMapping = 1;
5340 return 0;
5341 }
5342
5343 /* There should be enough mappings for all instructions. */
5344 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
5345
5346 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
5347 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5348 return i;
5349
5350 AssertFailedReturn(1024);
5351}
5352
5353
5354/**
5355 * Commits a bounce buffer that needs writing back and unmaps it.
5356 *
5357 * @returns Strict VBox status code.
5358 * @param pIemCpu The IEM per CPU data.
5359 * @param iMemMap The index of the buffer to commit.
5360 */
5361static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
5362{
5363 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5364 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5365
5366 /*
5367 * Do the writing.
5368 */
5369 int rc;
5370#ifndef IEM_VERIFICATION_MODE_MINIMAL
5371 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
5372 && !IEM_VERIFICATION_ENABLED(pIemCpu))
5373 {
5374 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5375 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5376 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5377 if (!pIemCpu->fBypassHandlers)
5378 {
5379 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5380 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5381 pbBuf,
5382 cbFirst);
5383 if (cbSecond && rc == VINF_SUCCESS)
5384 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5385 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5386 pbBuf + cbFirst,
5387 cbSecond);
5388 }
5389 else
5390 {
5391 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5392 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5393 pbBuf,
5394 cbFirst);
5395 if (cbSecond && rc == VINF_SUCCESS)
5396 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5397 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5398 pbBuf + cbFirst,
5399 cbSecond);
5400 }
5401 if (rc != VINF_SUCCESS)
5402 {
5403 /** @todo status code handling */
5404 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5405 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
5406 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5407 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5408 }
5409 }
5410 else
5411#endif
5412 rc = VINF_SUCCESS;
5413
5414#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5415 /*
5416 * Record the write(s).
5417 */
5418 if (!pIemCpu->fNoRem)
5419 {
5420 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5421 if (pEvtRec)
5422 {
5423 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5424 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
5425 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5426 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
5427 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
5428 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5429 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5430 }
5431 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5432 {
5433 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5434 if (pEvtRec)
5435 {
5436 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5437 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
5438 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5439 memcpy(pEvtRec->u.RamWrite.ab,
5440 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
5441 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
5442 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5443 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5444 }
5445 }
5446 }
5447#endif
5448#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
5449 if (rc == VINF_SUCCESS)
5450 {
5451 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5452 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
5453 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5454 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5455 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
5456 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
5457
5458 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5459 g_cbIemWrote = cbWrote;
5460 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5461 }
5462#endif
5463
5464 /*
5465 * Free the mapping entry.
5466 */
5467 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5468 Assert(pIemCpu->cActiveMappings != 0);
5469 pIemCpu->cActiveMappings--;
5470 return rc;
5471}
5472
5473
5474/**
5475 * iemMemMap worker that deals with a request crossing pages.
5476 */
5477static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
5478 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5479{
5480 /*
5481 * Do the address translations.
5482 */
5483 RTGCPHYS GCPhysFirst;
5484 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5485 if (rcStrict != VINF_SUCCESS)
5486 return rcStrict;
5487
5488/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
5489 * last byte. */
5490 RTGCPHYS GCPhysSecond;
5491 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
5492 if (rcStrict != VINF_SUCCESS)
5493 return rcStrict;
5494 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
5495
5496 /*
5497 * Read in the current memory content if it's a read, execute or partial
5498 * write access.
5499 */
5500 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5501 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
5502 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5503
5504 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5505 {
5506 int rc;
5507 if (!pIemCpu->fBypassHandlers)
5508 {
5509 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5510 if (rc != VINF_SUCCESS)
5511 {
5512 /** @todo status code handling */
5513 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5514 return rc;
5515 }
5516 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5517 if (rc != VINF_SUCCESS)
5518 {
5519 /** @todo status code handling */
5520 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5521 return rc;
5522 }
5523 }
5524 else
5525 {
5526 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5527 if (rc != VINF_SUCCESS)
5528 {
5529 /** @todo status code handling */
5530 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5531 return rc;
5532 }
5533 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5534 if (rc != VINF_SUCCESS)
5535 {
5536 /** @todo status code handling */
5537 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5538 return rc;
5539 }
5540 }
5541
5542#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5543 if ( !pIemCpu->fNoRem
5544 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5545 {
5546 /*
5547 * Record the reads.
5548 */
5549 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5550 if (pEvtRec)
5551 {
5552 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5553 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5554 pEvtRec->u.RamRead.cb = cbFirstPage;
5555 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5556 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5557 }
5558 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5559 if (pEvtRec)
5560 {
5561 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5562 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5563 pEvtRec->u.RamRead.cb = cbSecondPage;
5564 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5565 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5566 }
5567 }
5568#endif
5569 }
5570#ifdef VBOX_STRICT
5571 else
5572 memset(pbBuf, 0xcc, cbMem);
5573#endif
5574#ifdef VBOX_STRICT
5575 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5576 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5577#endif
5578
5579 /*
5580 * Commit the bounce buffer entry.
5581 */
5582 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5583 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5584 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5585 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5586 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5587 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5588 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5589 pIemCpu->iNextMapping = iMemMap + 1;
5590 pIemCpu->cActiveMappings++;
5591
5592 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5593 *ppvMem = pbBuf;
5594 return VINF_SUCCESS;
5595}
5596
5597
5598/**
5599 * iemMemMap woker that deals with iemMemPageMap failures.
5600 */
5601static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5602 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5603{
5604 /*
5605 * Filter out conditions we can handle and the ones which shouldn't happen.
5606 */
5607 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5608 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5609 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5610 {
5611 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5612 return rcMap;
5613 }
5614 pIemCpu->cPotentialExits++;
5615
5616 /*
5617 * Read in the current memory content if it's a read, execute or partial
5618 * write access.
5619 */
5620 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5621 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5622 {
5623 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5624 memset(pbBuf, 0xff, cbMem);
5625 else
5626 {
5627 int rc;
5628 if (!pIemCpu->fBypassHandlers)
5629 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5630 else
5631 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5632 if (rc != VINF_SUCCESS)
5633 {
5634 /** @todo status code handling */
5635 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5636 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5637 return rc;
5638 }
5639 }
5640
5641#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5642 if ( !pIemCpu->fNoRem
5643 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5644 {
5645 /*
5646 * Record the read.
5647 */
5648 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5649 if (pEvtRec)
5650 {
5651 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5652 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5653 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5654 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5655 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5656 }
5657 }
5658#endif
5659 }
5660#ifdef VBOX_STRICT
5661 else
5662 memset(pbBuf, 0xcc, cbMem);
5663#endif
5664#ifdef VBOX_STRICT
5665 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5666 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5667#endif
5668
5669 /*
5670 * Commit the bounce buffer entry.
5671 */
5672 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5673 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5674 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5675 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5676 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5677 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5678 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5679 pIemCpu->iNextMapping = iMemMap + 1;
5680 pIemCpu->cActiveMappings++;
5681
5682 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5683 *ppvMem = pbBuf;
5684 return VINF_SUCCESS;
5685}
5686
5687
5688
5689/**
5690 * Maps the specified guest memory for the given kind of access.
5691 *
5692 * This may be using bounce buffering of the memory if it's crossing a page
5693 * boundary or if there is an access handler installed for any of it. Because
5694 * of lock prefix guarantees, we're in for some extra clutter when this
5695 * happens.
5696 *
5697 * This may raise a \#GP, \#SS, \#PF or \#AC.
5698 *
5699 * @returns VBox strict status code.
5700 *
5701 * @param pIemCpu The IEM per CPU data.
5702 * @param ppvMem Where to return the pointer to the mapped
5703 * memory.
5704 * @param cbMem The number of bytes to map. This is usually 1,
5705 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5706 * string operations it can be up to a page.
5707 * @param iSegReg The index of the segment register to use for
5708 * this access. The base and limits are checked.
5709 * Use UINT8_MAX to indicate that no segmentation
5710 * is required (for IDT, GDT and LDT accesses).
5711 * @param GCPtrMem The address of the guest memory.
5712 * @param a_fAccess How the memory is being accessed. The
5713 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5714 * how to map the memory, while the
5715 * IEM_ACCESS_WHAT_XXX bit is used when raising
5716 * exceptions.
5717 */
5718static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5719{
5720 /*
5721 * Check the input and figure out which mapping entry to use.
5722 */
5723 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */
5724 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5725
5726 unsigned iMemMap = pIemCpu->iNextMapping;
5727 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
5728 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5729 {
5730 iMemMap = iemMemMapFindFree(pIemCpu);
5731 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5732 }
5733
5734 /*
5735 * Map the memory, checking that we can actually access it. If something
5736 * slightly complicated happens, fall back on bounce buffering.
5737 */
5738 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5739 if (rcStrict != VINF_SUCCESS)
5740 return rcStrict;
5741
5742 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5743 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5744
5745 RTGCPHYS GCPhysFirst;
5746 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5747 if (rcStrict != VINF_SUCCESS)
5748 return rcStrict;
5749
5750 void *pvMem;
5751 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5752 if (rcStrict != VINF_SUCCESS)
5753 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5754
5755 /*
5756 * Fill in the mapping table entry.
5757 */
5758 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5759 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5760 pIemCpu->iNextMapping = iMemMap + 1;
5761 pIemCpu->cActiveMappings++;
5762
5763 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5764 *ppvMem = pvMem;
5765 return VINF_SUCCESS;
5766}
5767
5768
5769/**
5770 * Commits the guest memory if bounce buffered and unmaps it.
5771 *
5772 * @returns Strict VBox status code.
5773 * @param pIemCpu The IEM per CPU data.
5774 * @param pvMem The mapping.
5775 * @param fAccess The kind of access.
5776 */
5777static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5778{
5779 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5780 AssertReturn(iMemMap >= 0, iMemMap);
5781
5782 /* If it's bounce buffered, we may need to write back the buffer. */
5783 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5784 {
5785 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5786 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5787 }
5788 /* Otherwise unlock it. */
5789 else
5790 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5791
5792 /* Free the entry. */
5793 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5794 Assert(pIemCpu->cActiveMappings != 0);
5795 pIemCpu->cActiveMappings--;
5796 return VINF_SUCCESS;
5797}
5798
5799
5800/**
5801 * Rollbacks mappings, releasing page locks and such.
5802 *
5803 * The caller shall only call this after checking cActiveMappings.
5804 *
5805 * @returns Strict VBox status code to pass up.
5806 * @param pIemCpu The IEM per CPU data.
5807 */
5808static void iemMemRollback(PIEMCPU pIemCpu)
5809{
5810 Assert(pIemCpu->cActiveMappings > 0);
5811
5812 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
5813 while (iMemMap-- > 0)
5814 {
5815 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
5816 if (fAccess != IEM_ACCESS_INVALID)
5817 {
5818 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5819 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
5820 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5821 Assert(pIemCpu->cActiveMappings > 0);
5822 pIemCpu->cActiveMappings--;
5823 }
5824 }
5825}
5826
5827
5828/**
5829 * Fetches a data byte.
5830 *
5831 * @returns Strict VBox status code.
5832 * @param pIemCpu The IEM per CPU data.
5833 * @param pu8Dst Where to return the byte.
5834 * @param iSegReg The index of the segment register to use for
5835 * this access. The base and limits are checked.
5836 * @param GCPtrMem The address of the guest memory.
5837 */
5838static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5839{
5840 /* The lazy approach for now... */
5841 uint8_t const *pu8Src;
5842 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5843 if (rc == VINF_SUCCESS)
5844 {
5845 *pu8Dst = *pu8Src;
5846 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5847 }
5848 return rc;
5849}
5850
5851
5852/**
5853 * Fetches a data word.
5854 *
5855 * @returns Strict VBox status code.
5856 * @param pIemCpu The IEM per CPU data.
5857 * @param pu16Dst Where to return the word.
5858 * @param iSegReg The index of the segment register to use for
5859 * this access. The base and limits are checked.
5860 * @param GCPtrMem The address of the guest memory.
5861 */
5862static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5863{
5864 /* The lazy approach for now... */
5865 uint16_t const *pu16Src;
5866 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5867 if (rc == VINF_SUCCESS)
5868 {
5869 *pu16Dst = *pu16Src;
5870 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5871 }
5872 return rc;
5873}
5874
5875
5876/**
5877 * Fetches a data dword.
5878 *
5879 * @returns Strict VBox status code.
5880 * @param pIemCpu The IEM per CPU data.
5881 * @param pu32Dst Where to return the dword.
5882 * @param iSegReg The index of the segment register to use for
5883 * this access. The base and limits are checked.
5884 * @param GCPtrMem The address of the guest memory.
5885 */
5886static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5887{
5888 /* The lazy approach for now... */
5889 uint32_t const *pu32Src;
5890 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5891 if (rc == VINF_SUCCESS)
5892 {
5893 *pu32Dst = *pu32Src;
5894 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5895 }
5896 return rc;
5897}
5898
5899
5900#ifdef SOME_UNUSED_FUNCTION
5901/**
5902 * Fetches a data dword and sign extends it to a qword.
5903 *
5904 * @returns Strict VBox status code.
5905 * @param pIemCpu The IEM per CPU data.
5906 * @param pu64Dst Where to return the sign extended value.
5907 * @param iSegReg The index of the segment register to use for
5908 * this access. The base and limits are checked.
5909 * @param GCPtrMem The address of the guest memory.
5910 */
5911static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5912{
5913 /* The lazy approach for now... */
5914 int32_t const *pi32Src;
5915 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5916 if (rc == VINF_SUCCESS)
5917 {
5918 *pu64Dst = *pi32Src;
5919 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5920 }
5921#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5922 else
5923 *pu64Dst = 0;
5924#endif
5925 return rc;
5926}
5927#endif
5928
5929
5930/**
5931 * Fetches a data qword.
5932 *
5933 * @returns Strict VBox status code.
5934 * @param pIemCpu The IEM per CPU data.
5935 * @param pu64Dst Where to return the qword.
5936 * @param iSegReg The index of the segment register to use for
5937 * this access. The base and limits are checked.
5938 * @param GCPtrMem The address of the guest memory.
5939 */
5940static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5941{
5942 /* The lazy approach for now... */
5943 uint64_t const *pu64Src;
5944 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5945 if (rc == VINF_SUCCESS)
5946 {
5947 *pu64Dst = *pu64Src;
5948 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5949 }
5950 return rc;
5951}
5952
5953
5954/**
5955 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
5956 *
5957 * @returns Strict VBox status code.
5958 * @param pIemCpu The IEM per CPU data.
5959 * @param pu64Dst Where to return the qword.
5960 * @param iSegReg The index of the segment register to use for
5961 * this access. The base and limits are checked.
5962 * @param GCPtrMem The address of the guest memory.
5963 */
5964static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5965{
5966 /* The lazy approach for now... */
5967 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
5968 if (RT_UNLIKELY(GCPtrMem & 15))
5969 return iemRaiseGeneralProtectionFault0(pIemCpu);
5970
5971 uint64_t const *pu64Src;
5972 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5973 if (rc == VINF_SUCCESS)
5974 {
5975 *pu64Dst = *pu64Src;
5976 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5977 }
5978 return rc;
5979}
5980
5981
5982/**
5983 * Fetches a data tword.
5984 *
5985 * @returns Strict VBox status code.
5986 * @param pIemCpu The IEM per CPU data.
5987 * @param pr80Dst Where to return the tword.
5988 * @param iSegReg The index of the segment register to use for
5989 * this access. The base and limits are checked.
5990 * @param GCPtrMem The address of the guest memory.
5991 */
5992static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5993{
5994 /* The lazy approach for now... */
5995 PCRTFLOAT80U pr80Src;
5996 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5997 if (rc == VINF_SUCCESS)
5998 {
5999 *pr80Dst = *pr80Src;
6000 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6001 }
6002 return rc;
6003}
6004
6005
6006/**
6007 * Fetches a data dqword (double qword), generally SSE related.
6008 *
6009 * @returns Strict VBox status code.
6010 * @param pIemCpu The IEM per CPU data.
6011 * @param pu128Dst Where to return the qword.
6012 * @param iSegReg The index of the segment register to use for
6013 * this access. The base and limits are checked.
6014 * @param GCPtrMem The address of the guest memory.
6015 */
6016static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6017{
6018 /* The lazy approach for now... */
6019 uint128_t const *pu128Src;
6020 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6021 if (rc == VINF_SUCCESS)
6022 {
6023 *pu128Dst = *pu128Src;
6024 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6025 }
6026 return rc;
6027}
6028
6029
6030/**
6031 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6032 * related.
6033 *
6034 * Raises GP(0) if not aligned.
6035 *
6036 * @returns Strict VBox status code.
6037 * @param pIemCpu The IEM per CPU data.
6038 * @param pu128Dst Where to return the qword.
6039 * @param iSegReg The index of the segment register to use for
6040 * this access. The base and limits are checked.
6041 * @param GCPtrMem The address of the guest memory.
6042 */
6043static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6044{
6045 /* The lazy approach for now... */
6046 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6047 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6048 return iemRaiseGeneralProtectionFault0(pIemCpu);
6049
6050 uint128_t const *pu128Src;
6051 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6052 if (rc == VINF_SUCCESS)
6053 {
6054 *pu128Dst = *pu128Src;
6055 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6056 }
6057 return rc;
6058}
6059
6060
6061
6062
6063/**
6064 * Fetches a descriptor register (lgdt, lidt).
6065 *
6066 * @returns Strict VBox status code.
6067 * @param pIemCpu The IEM per CPU data.
6068 * @param pcbLimit Where to return the limit.
6069 * @param pGCPTrBase Where to return the base.
6070 * @param iSegReg The index of the segment register to use for
6071 * this access. The base and limits are checked.
6072 * @param GCPtrMem The address of the guest memory.
6073 * @param enmOpSize The effective operand size.
6074 */
6075static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
6076 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6077{
6078 uint8_t const *pu8Src;
6079 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6080 (void **)&pu8Src,
6081 enmOpSize == IEMMODE_64BIT
6082 ? 2 + 8
6083 : enmOpSize == IEMMODE_32BIT
6084 ? 2 + 4
6085 : 2 + 3,
6086 iSegReg,
6087 GCPtrMem,
6088 IEM_ACCESS_DATA_R);
6089 if (rcStrict == VINF_SUCCESS)
6090 {
6091 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
6092 switch (enmOpSize)
6093 {
6094 case IEMMODE_16BIT:
6095 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
6096 break;
6097 case IEMMODE_32BIT:
6098 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
6099 break;
6100 case IEMMODE_64BIT:
6101 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
6102 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
6103 break;
6104
6105 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6106 }
6107 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6108 }
6109 return rcStrict;
6110}
6111
6112
6113
6114/**
6115 * Stores a data byte.
6116 *
6117 * @returns Strict VBox status code.
6118 * @param pIemCpu The IEM per CPU data.
6119 * @param iSegReg The index of the segment register to use for
6120 * this access. The base and limits are checked.
6121 * @param GCPtrMem The address of the guest memory.
6122 * @param u8Value The value to store.
6123 */
6124static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
6125{
6126 /* The lazy approach for now... */
6127 uint8_t *pu8Dst;
6128 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6129 if (rc == VINF_SUCCESS)
6130 {
6131 *pu8Dst = u8Value;
6132 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
6133 }
6134 return rc;
6135}
6136
6137
6138/**
6139 * Stores a data word.
6140 *
6141 * @returns Strict VBox status code.
6142 * @param pIemCpu The IEM per CPU data.
6143 * @param iSegReg The index of the segment register to use for
6144 * this access. The base and limits are checked.
6145 * @param GCPtrMem The address of the guest memory.
6146 * @param u16Value The value to store.
6147 */
6148static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
6149{
6150 /* The lazy approach for now... */
6151 uint16_t *pu16Dst;
6152 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6153 if (rc == VINF_SUCCESS)
6154 {
6155 *pu16Dst = u16Value;
6156 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
6157 }
6158 return rc;
6159}
6160
6161
6162/**
6163 * Stores a data dword.
6164 *
6165 * @returns Strict VBox status code.
6166 * @param pIemCpu The IEM per CPU data.
6167 * @param iSegReg The index of the segment register to use for
6168 * this access. The base and limits are checked.
6169 * @param GCPtrMem The address of the guest memory.
6170 * @param u32Value The value to store.
6171 */
6172static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
6173{
6174 /* The lazy approach for now... */
6175 uint32_t *pu32Dst;
6176 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6177 if (rc == VINF_SUCCESS)
6178 {
6179 *pu32Dst = u32Value;
6180 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
6181 }
6182 return rc;
6183}
6184
6185
6186/**
6187 * Stores a data qword.
6188 *
6189 * @returns Strict VBox status code.
6190 * @param pIemCpu The IEM per CPU data.
6191 * @param iSegReg The index of the segment register to use for
6192 * this access. The base and limits are checked.
6193 * @param GCPtrMem The address of the guest memory.
6194 * @param u64Value The value to store.
6195 */
6196static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
6197{
6198 /* The lazy approach for now... */
6199 uint64_t *pu64Dst;
6200 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6201 if (rc == VINF_SUCCESS)
6202 {
6203 *pu64Dst = u64Value;
6204 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
6205 }
6206 return rc;
6207}
6208
6209
6210/**
6211 * Stores a data dqword.
6212 *
6213 * @returns Strict VBox status code.
6214 * @param pIemCpu The IEM per CPU data.
6215 * @param iSegReg The index of the segment register to use for
6216 * this access. The base and limits are checked.
6217 * @param GCPtrMem The address of the guest memory.
6218 * @param u64Value The value to store.
6219 */
6220static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6221{
6222 /* The lazy approach for now... */
6223 uint128_t *pu128Dst;
6224 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6225 if (rc == VINF_SUCCESS)
6226 {
6227 *pu128Dst = u128Value;
6228 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6229 }
6230 return rc;
6231}
6232
6233
6234/**
6235 * Stores a data dqword, SSE aligned.
6236 *
6237 * @returns Strict VBox status code.
6238 * @param pIemCpu The IEM per CPU data.
6239 * @param iSegReg The index of the segment register to use for
6240 * this access. The base and limits are checked.
6241 * @param GCPtrMem The address of the guest memory.
6242 * @param u64Value The value to store.
6243 */
6244static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6245{
6246 /* The lazy approach for now... */
6247 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6248 return iemRaiseGeneralProtectionFault0(pIemCpu);
6249
6250 uint128_t *pu128Dst;
6251 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6252 if (rc == VINF_SUCCESS)
6253 {
6254 *pu128Dst = u128Value;
6255 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6256 }
6257 return rc;
6258}
6259
6260
6261/**
6262 * Stores a descriptor register (sgdt, sidt).
6263 *
6264 * @returns Strict VBox status code.
6265 * @param pIemCpu The IEM per CPU data.
6266 * @param cbLimit The limit.
6267 * @param GCPTrBase The base address.
6268 * @param iSegReg The index of the segment register to use for
6269 * this access. The base and limits are checked.
6270 * @param GCPtrMem The address of the guest memory.
6271 * @param enmOpSize The effective operand size.
6272 */
6273static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
6274 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6275{
6276 uint8_t *pu8Src;
6277 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6278 (void **)&pu8Src,
6279 enmOpSize == IEMMODE_64BIT
6280 ? 2 + 8
6281 : enmOpSize == IEMMODE_32BIT
6282 ? 2 + 4
6283 : 2 + 3,
6284 iSegReg,
6285 GCPtrMem,
6286 IEM_ACCESS_DATA_W);
6287 if (rcStrict == VINF_SUCCESS)
6288 {
6289 pu8Src[0] = RT_BYTE1(cbLimit);
6290 pu8Src[1] = RT_BYTE2(cbLimit);
6291 pu8Src[2] = RT_BYTE1(GCPtrBase);
6292 pu8Src[3] = RT_BYTE2(GCPtrBase);
6293 pu8Src[4] = RT_BYTE3(GCPtrBase);
6294 if (enmOpSize == IEMMODE_16BIT)
6295 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
6296 else
6297 {
6298 pu8Src[5] = RT_BYTE4(GCPtrBase);
6299 if (enmOpSize == IEMMODE_64BIT)
6300 {
6301 pu8Src[6] = RT_BYTE5(GCPtrBase);
6302 pu8Src[7] = RT_BYTE6(GCPtrBase);
6303 pu8Src[8] = RT_BYTE7(GCPtrBase);
6304 pu8Src[9] = RT_BYTE8(GCPtrBase);
6305 }
6306 }
6307 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
6308 }
6309 return rcStrict;
6310}
6311
6312
6313/**
6314 * Pushes a word onto the stack.
6315 *
6316 * @returns Strict VBox status code.
6317 * @param pIemCpu The IEM per CPU data.
6318 * @param u16Value The value to push.
6319 */
6320static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
6321{
6322 /* Increment the stack pointer. */
6323 uint64_t uNewRsp;
6324 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6325 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
6326
6327 /* Write the word the lazy way. */
6328 uint16_t *pu16Dst;
6329 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6330 if (rc == VINF_SUCCESS)
6331 {
6332 *pu16Dst = u16Value;
6333 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6334 }
6335
6336 /* Commit the new RSP value unless we an access handler made trouble. */
6337 if (rc == VINF_SUCCESS)
6338 pCtx->rsp = uNewRsp;
6339
6340 return rc;
6341}
6342
6343
6344/**
6345 * Pushes a dword onto the stack.
6346 *
6347 * @returns Strict VBox status code.
6348 * @param pIemCpu The IEM per CPU data.
6349 * @param u32Value The value to push.
6350 */
6351static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
6352{
6353 /* Increment the stack pointer. */
6354 uint64_t uNewRsp;
6355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6356 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
6357
6358 /* Write the word the lazy way. */
6359 uint32_t *pu32Dst;
6360 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6361 if (rc == VINF_SUCCESS)
6362 {
6363 *pu32Dst = u32Value;
6364 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6365 }
6366
6367 /* Commit the new RSP value unless we an access handler made trouble. */
6368 if (rc == VINF_SUCCESS)
6369 pCtx->rsp = uNewRsp;
6370
6371 return rc;
6372}
6373
6374
6375/**
6376 * Pushes a qword onto the stack.
6377 *
6378 * @returns Strict VBox status code.
6379 * @param pIemCpu The IEM per CPU data.
6380 * @param u64Value The value to push.
6381 */
6382static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
6383{
6384 /* Increment the stack pointer. */
6385 uint64_t uNewRsp;
6386 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6387 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
6388
6389 /* Write the word the lazy way. */
6390 uint64_t *pu64Dst;
6391 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6392 if (rc == VINF_SUCCESS)
6393 {
6394 *pu64Dst = u64Value;
6395 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6396 }
6397
6398 /* Commit the new RSP value unless we an access handler made trouble. */
6399 if (rc == VINF_SUCCESS)
6400 pCtx->rsp = uNewRsp;
6401
6402 return rc;
6403}
6404
6405
6406/**
6407 * Pops a word from the stack.
6408 *
6409 * @returns Strict VBox status code.
6410 * @param pIemCpu The IEM per CPU data.
6411 * @param pu16Value Where to store the popped value.
6412 */
6413static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
6414{
6415 /* Increment the stack pointer. */
6416 uint64_t uNewRsp;
6417 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6418 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
6419
6420 /* Write the word the lazy way. */
6421 uint16_t const *pu16Src;
6422 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6423 if (rc == VINF_SUCCESS)
6424 {
6425 *pu16Value = *pu16Src;
6426 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6427
6428 /* Commit the new RSP value. */
6429 if (rc == VINF_SUCCESS)
6430 pCtx->rsp = uNewRsp;
6431 }
6432
6433 return rc;
6434}
6435
6436
6437/**
6438 * Pops a dword from the stack.
6439 *
6440 * @returns Strict VBox status code.
6441 * @param pIemCpu The IEM per CPU data.
6442 * @param pu32Value Where to store the popped value.
6443 */
6444static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
6445{
6446 /* Increment the stack pointer. */
6447 uint64_t uNewRsp;
6448 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6449 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
6450
6451 /* Write the word the lazy way. */
6452 uint32_t const *pu32Src;
6453 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6454 if (rc == VINF_SUCCESS)
6455 {
6456 *pu32Value = *pu32Src;
6457 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6458
6459 /* Commit the new RSP value. */
6460 if (rc == VINF_SUCCESS)
6461 pCtx->rsp = uNewRsp;
6462 }
6463
6464 return rc;
6465}
6466
6467
6468/**
6469 * Pops a qword from the stack.
6470 *
6471 * @returns Strict VBox status code.
6472 * @param pIemCpu The IEM per CPU data.
6473 * @param pu64Value Where to store the popped value.
6474 */
6475static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
6476{
6477 /* Increment the stack pointer. */
6478 uint64_t uNewRsp;
6479 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6480 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
6481
6482 /* Write the word the lazy way. */
6483 uint64_t const *pu64Src;
6484 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6485 if (rc == VINF_SUCCESS)
6486 {
6487 *pu64Value = *pu64Src;
6488 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6489
6490 /* Commit the new RSP value. */
6491 if (rc == VINF_SUCCESS)
6492 pCtx->rsp = uNewRsp;
6493 }
6494
6495 return rc;
6496}
6497
6498
6499/**
6500 * Pushes a word onto the stack, using a temporary stack pointer.
6501 *
6502 * @returns Strict VBox status code.
6503 * @param pIemCpu The IEM per CPU data.
6504 * @param u16Value The value to push.
6505 * @param pTmpRsp Pointer to the temporary stack pointer.
6506 */
6507static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
6508{
6509 /* Increment the stack pointer. */
6510 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6511 RTUINT64U NewRsp = *pTmpRsp;
6512 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
6513
6514 /* Write the word the lazy way. */
6515 uint16_t *pu16Dst;
6516 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6517 if (rc == VINF_SUCCESS)
6518 {
6519 *pu16Dst = u16Value;
6520 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6521 }
6522
6523 /* Commit the new RSP value unless we an access handler made trouble. */
6524 if (rc == VINF_SUCCESS)
6525 *pTmpRsp = NewRsp;
6526
6527 return rc;
6528}
6529
6530
6531/**
6532 * Pushes a dword onto the stack, using a temporary stack pointer.
6533 *
6534 * @returns Strict VBox status code.
6535 * @param pIemCpu The IEM per CPU data.
6536 * @param u32Value The value to push.
6537 * @param pTmpRsp Pointer to the temporary stack pointer.
6538 */
6539static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
6540{
6541 /* Increment the stack pointer. */
6542 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6543 RTUINT64U NewRsp = *pTmpRsp;
6544 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
6545
6546 /* Write the word the lazy way. */
6547 uint32_t *pu32Dst;
6548 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6549 if (rc == VINF_SUCCESS)
6550 {
6551 *pu32Dst = u32Value;
6552 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6553 }
6554
6555 /* Commit the new RSP value unless we an access handler made trouble. */
6556 if (rc == VINF_SUCCESS)
6557 *pTmpRsp = NewRsp;
6558
6559 return rc;
6560}
6561
6562
6563/**
6564 * Pushes a dword onto the stack, using a temporary stack pointer.
6565 *
6566 * @returns Strict VBox status code.
6567 * @param pIemCpu The IEM per CPU data.
6568 * @param u64Value The value to push.
6569 * @param pTmpRsp Pointer to the temporary stack pointer.
6570 */
6571static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
6572{
6573 /* Increment the stack pointer. */
6574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6575 RTUINT64U NewRsp = *pTmpRsp;
6576 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
6577
6578 /* Write the word the lazy way. */
6579 uint64_t *pu64Dst;
6580 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6581 if (rc == VINF_SUCCESS)
6582 {
6583 *pu64Dst = u64Value;
6584 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6585 }
6586
6587 /* Commit the new RSP value unless we an access handler made trouble. */
6588 if (rc == VINF_SUCCESS)
6589 *pTmpRsp = NewRsp;
6590
6591 return rc;
6592}
6593
6594
6595/**
6596 * Pops a word from the stack, using a temporary stack pointer.
6597 *
6598 * @returns Strict VBox status code.
6599 * @param pIemCpu The IEM per CPU data.
6600 * @param pu16Value Where to store the popped value.
6601 * @param pTmpRsp Pointer to the temporary stack pointer.
6602 */
6603static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
6604{
6605 /* Increment the stack pointer. */
6606 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6607 RTUINT64U NewRsp = *pTmpRsp;
6608 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
6609
6610 /* Write the word the lazy way. */
6611 uint16_t const *pu16Src;
6612 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6613 if (rc == VINF_SUCCESS)
6614 {
6615 *pu16Value = *pu16Src;
6616 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6617
6618 /* Commit the new RSP value. */
6619 if (rc == VINF_SUCCESS)
6620 *pTmpRsp = NewRsp;
6621 }
6622
6623 return rc;
6624}
6625
6626
6627/**
6628 * Pops a dword from the stack, using a temporary stack pointer.
6629 *
6630 * @returns Strict VBox status code.
6631 * @param pIemCpu The IEM per CPU data.
6632 * @param pu32Value Where to store the popped value.
6633 * @param pTmpRsp Pointer to the temporary stack pointer.
6634 */
6635static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
6636{
6637 /* Increment the stack pointer. */
6638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6639 RTUINT64U NewRsp = *pTmpRsp;
6640 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
6641
6642 /* Write the word the lazy way. */
6643 uint32_t const *pu32Src;
6644 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6645 if (rc == VINF_SUCCESS)
6646 {
6647 *pu32Value = *pu32Src;
6648 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6649
6650 /* Commit the new RSP value. */
6651 if (rc == VINF_SUCCESS)
6652 *pTmpRsp = NewRsp;
6653 }
6654
6655 return rc;
6656}
6657
6658
6659/**
6660 * Pops a qword from the stack, using a temporary stack pointer.
6661 *
6662 * @returns Strict VBox status code.
6663 * @param pIemCpu The IEM per CPU data.
6664 * @param pu64Value Where to store the popped value.
6665 * @param pTmpRsp Pointer to the temporary stack pointer.
6666 */
6667static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
6668{
6669 /* Increment the stack pointer. */
6670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6671 RTUINT64U NewRsp = *pTmpRsp;
6672 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6673
6674 /* Write the word the lazy way. */
6675 uint64_t const *pu64Src;
6676 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6677 if (rcStrict == VINF_SUCCESS)
6678 {
6679 *pu64Value = *pu64Src;
6680 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6681
6682 /* Commit the new RSP value. */
6683 if (rcStrict == VINF_SUCCESS)
6684 *pTmpRsp = NewRsp;
6685 }
6686
6687 return rcStrict;
6688}
6689
6690
6691/**
6692 * Begin a special stack push (used by interrupt, exceptions and such).
6693 *
6694 * This will raise #SS or #PF if appropriate.
6695 *
6696 * @returns Strict VBox status code.
6697 * @param pIemCpu The IEM per CPU data.
6698 * @param cbMem The number of bytes to push onto the stack.
6699 * @param ppvMem Where to return the pointer to the stack memory.
6700 * As with the other memory functions this could be
6701 * direct access or bounce buffered access, so
6702 * don't commit register until the commit call
6703 * succeeds.
6704 * @param puNewRsp Where to return the new RSP value. This must be
6705 * passed unchanged to
6706 * iemMemStackPushCommitSpecial().
6707 */
6708static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6709{
6710 Assert(cbMem < UINT8_MAX);
6711 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6712 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6713 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6714}
6715
6716
6717/**
6718 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6719 *
6720 * This will update the rSP.
6721 *
6722 * @returns Strict VBox status code.
6723 * @param pIemCpu The IEM per CPU data.
6724 * @param pvMem The pointer returned by
6725 * iemMemStackPushBeginSpecial().
6726 * @param uNewRsp The new RSP value returned by
6727 * iemMemStackPushBeginSpecial().
6728 */
6729static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6730{
6731 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6732 if (rcStrict == VINF_SUCCESS)
6733 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6734 return rcStrict;
6735}
6736
6737
6738/**
6739 * Begin a special stack pop (used by iret, retf and such).
6740 *
6741 * This will raise \#SS or \#PF if appropriate.
6742 *
6743 * @returns Strict VBox status code.
6744 * @param pIemCpu The IEM per CPU data.
6745 * @param cbMem The number of bytes to push onto the stack.
6746 * @param ppvMem Where to return the pointer to the stack memory.
6747 * @param puNewRsp Where to return the new RSP value. This must be
6748 * passed unchanged to
6749 * iemMemStackPopCommitSpecial() or applied
6750 * manually if iemMemStackPopDoneSpecial() is used.
6751 */
6752static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6753{
6754 Assert(cbMem < UINT8_MAX);
6755 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6756 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6757 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6758}
6759
6760
6761/**
6762 * Continue a special stack pop (used by iret and retf).
6763 *
6764 * This will raise \#SS or \#PF if appropriate.
6765 *
6766 * @returns Strict VBox status code.
6767 * @param pIemCpu The IEM per CPU data.
6768 * @param cbMem The number of bytes to push onto the stack.
6769 * @param ppvMem Where to return the pointer to the stack memory.
6770 * @param puNewRsp Where to return the new RSP value. This must be
6771 * passed unchanged to
6772 * iemMemStackPopCommitSpecial() or applied
6773 * manually if iemMemStackPopDoneSpecial() is used.
6774 */
6775static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6776{
6777 Assert(cbMem < UINT8_MAX);
6778 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6779 RTUINT64U NewRsp;
6780 NewRsp.u = *puNewRsp;
6781 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6782 *puNewRsp = NewRsp.u;
6783 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6784}
6785
6786
6787/**
6788 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6789 *
6790 * This will update the rSP.
6791 *
6792 * @returns Strict VBox status code.
6793 * @param pIemCpu The IEM per CPU data.
6794 * @param pvMem The pointer returned by
6795 * iemMemStackPopBeginSpecial().
6796 * @param uNewRsp The new RSP value returned by
6797 * iemMemStackPopBeginSpecial().
6798 */
6799static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6800{
6801 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6802 if (rcStrict == VINF_SUCCESS)
6803 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6804 return rcStrict;
6805}
6806
6807
6808/**
6809 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6810 * iemMemStackPopContinueSpecial).
6811 *
6812 * The caller will manually commit the rSP.
6813 *
6814 * @returns Strict VBox status code.
6815 * @param pIemCpu The IEM per CPU data.
6816 * @param pvMem The pointer returned by
6817 * iemMemStackPopBeginSpecial() or
6818 * iemMemStackPopContinueSpecial().
6819 */
6820static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6821{
6822 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6823}
6824
6825
6826/**
6827 * Fetches a system table byte.
6828 *
6829 * @returns Strict VBox status code.
6830 * @param pIemCpu The IEM per CPU data.
6831 * @param pbDst Where to return the byte.
6832 * @param iSegReg The index of the segment register to use for
6833 * this access. The base and limits are checked.
6834 * @param GCPtrMem The address of the guest memory.
6835 */
6836static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6837{
6838 /* The lazy approach for now... */
6839 uint8_t const *pbSrc;
6840 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6841 if (rc == VINF_SUCCESS)
6842 {
6843 *pbDst = *pbSrc;
6844 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
6845 }
6846 return rc;
6847}
6848
6849
6850/**
6851 * Fetches a system table word.
6852 *
6853 * @returns Strict VBox status code.
6854 * @param pIemCpu The IEM per CPU data.
6855 * @param pu16Dst Where to return the word.
6856 * @param iSegReg The index of the segment register to use for
6857 * this access. The base and limits are checked.
6858 * @param GCPtrMem The address of the guest memory.
6859 */
6860static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6861{
6862 /* The lazy approach for now... */
6863 uint16_t const *pu16Src;
6864 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6865 if (rc == VINF_SUCCESS)
6866 {
6867 *pu16Dst = *pu16Src;
6868 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
6869 }
6870 return rc;
6871}
6872
6873
6874/**
6875 * Fetches a system table dword.
6876 *
6877 * @returns Strict VBox status code.
6878 * @param pIemCpu The IEM per CPU data.
6879 * @param pu32Dst Where to return the dword.
6880 * @param iSegReg The index of the segment register to use for
6881 * this access. The base and limits are checked.
6882 * @param GCPtrMem The address of the guest memory.
6883 */
6884static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6885{
6886 /* The lazy approach for now... */
6887 uint32_t const *pu32Src;
6888 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6889 if (rc == VINF_SUCCESS)
6890 {
6891 *pu32Dst = *pu32Src;
6892 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6893 }
6894 return rc;
6895}
6896
6897
6898/**
6899 * Fetches a system table qword.
6900 *
6901 * @returns Strict VBox status code.
6902 * @param pIemCpu The IEM per CPU data.
6903 * @param pu64Dst Where to return the qword.
6904 * @param iSegReg The index of the segment register to use for
6905 * this access. The base and limits are checked.
6906 * @param GCPtrMem The address of the guest memory.
6907 */
6908static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6909{
6910 /* The lazy approach for now... */
6911 uint64_t const *pu64Src;
6912 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6913 if (rc == VINF_SUCCESS)
6914 {
6915 *pu64Dst = *pu64Src;
6916 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6917 }
6918 return rc;
6919}
6920
6921
6922/**
6923 * Fetches a descriptor table entry.
6924 *
6925 * @returns Strict VBox status code.
6926 * @param pIemCpu The IEM per CPU.
6927 * @param pDesc Where to return the descriptor table entry.
6928 * @param uSel The selector which table entry to fetch.
6929 * @param uXcpt The exception to raise on table lookup error.
6930 */
6931static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
6932{
6933 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6934
6935 /** @todo did the 286 require all 8 bytes to be accessible? */
6936 /*
6937 * Get the selector table base and check bounds.
6938 */
6939 RTGCPTR GCPtrBase;
6940 if (uSel & X86_SEL_LDT)
6941 {
6942 if ( !pCtx->ldtr.Attr.n.u1Present
6943 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6944 {
6945 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6946 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6947 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6948 uSel & ~X86_SEL_RPL, 0);
6949 }
6950
6951 Assert(pCtx->ldtr.Attr.n.u1Present);
6952 GCPtrBase = pCtx->ldtr.u64Base;
6953 }
6954 else
6955 {
6956 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6957 {
6958 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6959 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6960 uSel & ~X86_SEL_RPL, 0);
6961 }
6962 GCPtrBase = pCtx->gdtr.pGdt;
6963 }
6964
6965 /*
6966 * Read the legacy descriptor and maybe the long mode extensions if
6967 * required.
6968 */
6969 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6970 if (rcStrict == VINF_SUCCESS)
6971 {
6972 if ( !IEM_IS_LONG_MODE(pIemCpu)
6973 || pDesc->Legacy.Gen.u1DescType)
6974 pDesc->Long.au64[1] = 0;
6975 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6976 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6977 else
6978 {
6979 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6980 /** @todo is this the right exception? */
6981 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6982 uSel & ~X86_SEL_RPL, 0);
6983 }
6984 }
6985 return rcStrict;
6986}
6987
6988
6989/**
6990 * Fakes a long mode stack selector for SS = 0.
6991 *
6992 * @param pDescSs Where to return the fake stack descriptor.
6993 * @param uDpl The DPL we want.
6994 */
6995static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6996{
6997 pDescSs->Long.au64[0] = 0;
6998 pDescSs->Long.au64[1] = 0;
6999 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
7000 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
7001 pDescSs->Long.Gen.u2Dpl = uDpl;
7002 pDescSs->Long.Gen.u1Present = 1;
7003 pDescSs->Long.Gen.u1Long = 1;
7004}
7005
7006
7007/**
7008 * Marks the selector descriptor as accessed (only non-system descriptors).
7009 *
7010 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7011 * will therefore skip the limit checks.
7012 *
7013 * @returns Strict VBox status code.
7014 * @param pIemCpu The IEM per CPU.
7015 * @param uSel The selector.
7016 */
7017static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
7018{
7019 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7020
7021 /*
7022 * Get the selector table base and calculate the entry address.
7023 */
7024 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7025 ? pCtx->ldtr.u64Base
7026 : pCtx->gdtr.pGdt;
7027 GCPtr += uSel & X86_SEL_MASK;
7028
7029 /*
7030 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7031 * ugly stuff to avoid this. This will make sure it's an atomic access
7032 * as well more or less remove any question about 8-bit or 32-bit accesss.
7033 */
7034 VBOXSTRICTRC rcStrict;
7035 uint32_t volatile *pu32;
7036 if ((GCPtr & 3) == 0)
7037 {
7038 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7039 GCPtr += 2 + 2;
7040 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
7041 if (rcStrict != VINF_SUCCESS)
7042 return rcStrict;
7043 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7044 }
7045 else
7046 {
7047 /* The misaligned GDT/LDT case, map the whole thing. */
7048 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
7049 if (rcStrict != VINF_SUCCESS)
7050 return rcStrict;
7051 switch ((uintptr_t)pu32 & 3)
7052 {
7053 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7054 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7055 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7056 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7057 }
7058 }
7059
7060 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
7061}
7062
7063/** @} */
7064
7065
7066/*
7067 * Include the C/C++ implementation of instruction.
7068 */
7069#include "IEMAllCImpl.cpp.h"
7070
7071
7072
7073/** @name "Microcode" macros.
7074 *
7075 * The idea is that we should be able to use the same code to interpret
7076 * instructions as well as recompiler instructions. Thus this obfuscation.
7077 *
7078 * @{
7079 */
7080#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
7081#define IEM_MC_END() }
7082#define IEM_MC_PAUSE() do {} while (0)
7083#define IEM_MC_CONTINUE() do {} while (0)
7084
7085/** Internal macro. */
7086#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
7087 do \
7088 { \
7089 VBOXSTRICTRC rcStrict2 = a_Expr; \
7090 if (rcStrict2 != VINF_SUCCESS) \
7091 return rcStrict2; \
7092 } while (0)
7093
7094#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
7095#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
7096#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
7097#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
7098#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
7099#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
7100#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
7101
7102#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
7103#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
7104 do { \
7105 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
7106 return iemRaiseDeviceNotAvailable(pIemCpu); \
7107 } while (0)
7108#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
7109 do { \
7110 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
7111 return iemRaiseMathFault(pIemCpu); \
7112 } while (0)
7113#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
7114 do { \
7115 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7116 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
7117 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
7118 return iemRaiseUndefinedOpcode(pIemCpu); \
7119 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7120 return iemRaiseDeviceNotAvailable(pIemCpu); \
7121 } while (0)
7122#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
7123 do { \
7124 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7125 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
7126 return iemRaiseUndefinedOpcode(pIemCpu); \
7127 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7128 return iemRaiseDeviceNotAvailable(pIemCpu); \
7129 } while (0)
7130#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
7131 do { \
7132 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7133 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
7134 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
7135 return iemRaiseUndefinedOpcode(pIemCpu); \
7136 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7137 return iemRaiseDeviceNotAvailable(pIemCpu); \
7138 } while (0)
7139#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
7140 do { \
7141 if (pIemCpu->uCpl != 0) \
7142 return iemRaiseGeneralProtectionFault0(pIemCpu); \
7143 } while (0)
7144
7145
7146#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
7147#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
7148#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
7149#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
7150#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
7151#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
7152#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
7153 uint32_t a_Name; \
7154 uint32_t *a_pName = &a_Name
7155#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
7156 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
7157
7158#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
7159#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
7160
7161#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7162#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7163#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7164#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7165#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7166#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7167#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7168#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7169#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7170#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7171#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7172#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7173#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7174#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7175#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
7176#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
7177#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
7178#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7179#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7180#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7181#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7182#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7183#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
7184#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7185#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7186#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7187#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7188#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7189#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7190/** @note Not for IOPL or IF testing or modification. */
7191#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7192#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7193#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
7194#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
7195
7196#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
7197#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
7198#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
7199#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
7200#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
7201#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
7202#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
7203#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
7204#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
7205#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
7206#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
7207 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
7208
7209#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
7210#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
7211/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
7212 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
7213#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
7214#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
7215/** @note Not for IOPL or IF testing or modification. */
7216#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7217
7218#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
7219#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
7220#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
7221 do { \
7222 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7223 *pu32Reg += (a_u32Value); \
7224 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7225 } while (0)
7226#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
7227
7228#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
7229#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
7230#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
7231 do { \
7232 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7233 *pu32Reg -= (a_u32Value); \
7234 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7235 } while (0)
7236#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
7237
7238#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
7239#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
7240#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
7241#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
7242#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
7243#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
7244#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
7245
7246#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
7247#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
7248#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7249#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
7250
7251#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
7252#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
7253#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
7254
7255#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
7256#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7257
7258#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
7259#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
7260#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
7261
7262#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
7263#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
7264#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
7265
7266#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7267
7268#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7269
7270#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
7271#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
7272#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
7273 do { \
7274 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7275 *pu32Reg &= (a_u32Value); \
7276 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7277 } while (0)
7278#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
7279
7280#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
7281#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
7282#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
7283 do { \
7284 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7285 *pu32Reg |= (a_u32Value); \
7286 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7287 } while (0)
7288#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
7289
7290
7291/** @note Not for IOPL or IF modification. */
7292#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
7293/** @note Not for IOPL or IF modification. */
7294#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
7295/** @note Not for IOPL or IF modification. */
7296#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
7297
7298#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
7299
7300
7301#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
7302 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
7303#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
7304 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
7305#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
7306 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
7307#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
7308 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
7309#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
7310 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7311#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
7312 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7313#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
7314 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7315
7316#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
7317 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
7318#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
7319 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
7320#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
7321 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
7322#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
7323 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
7324#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
7325 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
7326 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7327 } while (0)
7328#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
7329 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
7330 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7331 } while (0)
7332#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
7333 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7334#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
7335 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7336#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
7337 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
7338
7339#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
7340 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
7341#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
7342 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
7343#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
7344 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
7345
7346#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7347 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
7348#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7349 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7350#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
7351 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
7352
7353#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7354 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
7355#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7356 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7357#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
7358 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
7359
7360#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7361 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7362
7363#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7364 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7365#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7366 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7367#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7368 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7369
7370#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
7371 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
7372#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
7373 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
7374#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
7375 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
7376
7377#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7378 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7379#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
7380 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7381
7382
7383
7384#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7385 do { \
7386 uint8_t u8Tmp; \
7387 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7388 (a_u16Dst) = u8Tmp; \
7389 } while (0)
7390#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7391 do { \
7392 uint8_t u8Tmp; \
7393 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7394 (a_u32Dst) = u8Tmp; \
7395 } while (0)
7396#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7397 do { \
7398 uint8_t u8Tmp; \
7399 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7400 (a_u64Dst) = u8Tmp; \
7401 } while (0)
7402#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7403 do { \
7404 uint16_t u16Tmp; \
7405 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7406 (a_u32Dst) = u16Tmp; \
7407 } while (0)
7408#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7409 do { \
7410 uint16_t u16Tmp; \
7411 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7412 (a_u64Dst) = u16Tmp; \
7413 } while (0)
7414#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7415 do { \
7416 uint32_t u32Tmp; \
7417 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7418 (a_u64Dst) = u32Tmp; \
7419 } while (0)
7420
7421#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7422 do { \
7423 uint8_t u8Tmp; \
7424 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7425 (a_u16Dst) = (int8_t)u8Tmp; \
7426 } while (0)
7427#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7428 do { \
7429 uint8_t u8Tmp; \
7430 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7431 (a_u32Dst) = (int8_t)u8Tmp; \
7432 } while (0)
7433#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7434 do { \
7435 uint8_t u8Tmp; \
7436 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7437 (a_u64Dst) = (int8_t)u8Tmp; \
7438 } while (0)
7439#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7440 do { \
7441 uint16_t u16Tmp; \
7442 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7443 (a_u32Dst) = (int16_t)u16Tmp; \
7444 } while (0)
7445#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7446 do { \
7447 uint16_t u16Tmp; \
7448 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7449 (a_u64Dst) = (int16_t)u16Tmp; \
7450 } while (0)
7451#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7452 do { \
7453 uint32_t u32Tmp; \
7454 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7455 (a_u64Dst) = (int32_t)u32Tmp; \
7456 } while (0)
7457
7458#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
7459 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
7460#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
7461 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
7462#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
7463 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
7464#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
7465 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
7466
7467#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
7468 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
7469#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
7470 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
7471#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
7472 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
7473#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
7474 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
7475
7476#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
7477#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
7478#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
7479#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
7480#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
7481#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
7482#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
7483 do { \
7484 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
7485 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
7486 } while (0)
7487
7488#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
7489 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7490#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
7491 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7492
7493
7494#define IEM_MC_PUSH_U16(a_u16Value) \
7495 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
7496#define IEM_MC_PUSH_U32(a_u32Value) \
7497 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
7498#define IEM_MC_PUSH_U64(a_u64Value) \
7499 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
7500
7501#define IEM_MC_POP_U16(a_pu16Value) \
7502 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
7503#define IEM_MC_POP_U32(a_pu32Value) \
7504 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
7505#define IEM_MC_POP_U64(a_pu64Value) \
7506 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
7507
7508/** Maps guest memory for direct or bounce buffered access.
7509 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7510 * @remarks May return.
7511 */
7512#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
7513 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7514
7515/** Maps guest memory for direct or bounce buffered access.
7516 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7517 * @remarks May return.
7518 */
7519#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
7520 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7521
7522/** Commits the memory and unmaps the guest memory.
7523 * @remarks May return.
7524 */
7525#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
7526 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
7527
7528/** Commits the memory and unmaps the guest memory unless the FPU status word
7529 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
7530 * that would cause FLD not to store.
7531 *
7532 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
7533 * store, while \#P will not.
7534 *
7535 * @remarks May in theory return - for now.
7536 */
7537#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
7538 do { \
7539 if ( !(a_u16FSW & X86_FSW_ES) \
7540 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
7541 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
7542 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
7543 } while (0)
7544
7545/** Calculate efficient address from R/M. */
7546#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
7547 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
7548
7549#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
7550#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
7551#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
7552#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
7553#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
7554#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
7555#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
7556
7557/**
7558 * Defers the rest of the instruction emulation to a C implementation routine
7559 * and returns, only taking the standard parameters.
7560 *
7561 * @param a_pfnCImpl The pointer to the C routine.
7562 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7563 */
7564#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7565
7566/**
7567 * Defers the rest of instruction emulation to a C implementation routine and
7568 * returns, taking one argument in addition to the standard ones.
7569 *
7570 * @param a_pfnCImpl The pointer to the C routine.
7571 * @param a0 The argument.
7572 */
7573#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7574
7575/**
7576 * Defers the rest of the instruction emulation to a C implementation routine
7577 * and returns, taking two arguments in addition to the standard ones.
7578 *
7579 * @param a_pfnCImpl The pointer to the C routine.
7580 * @param a0 The first extra argument.
7581 * @param a1 The second extra argument.
7582 */
7583#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7584
7585/**
7586 * Defers the rest of the instruction emulation to a C implementation routine
7587 * and returns, taking three arguments in addition to the standard ones.
7588 *
7589 * @param a_pfnCImpl The pointer to the C routine.
7590 * @param a0 The first extra argument.
7591 * @param a1 The second extra argument.
7592 * @param a2 The third extra argument.
7593 */
7594#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7595
7596/**
7597 * Defers the rest of the instruction emulation to a C implementation routine
7598 * and returns, taking four arguments in addition to the standard ones.
7599 *
7600 * @param a_pfnCImpl The pointer to the C routine.
7601 * @param a0 The first extra argument.
7602 * @param a1 The second extra argument.
7603 * @param a2 The third extra argument.
7604 * @param a3 The fourth extra argument.
7605 */
7606#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
7607
7608/**
7609 * Defers the rest of the instruction emulation to a C implementation routine
7610 * and returns, taking two arguments in addition to the standard ones.
7611 *
7612 * @param a_pfnCImpl The pointer to the C routine.
7613 * @param a0 The first extra argument.
7614 * @param a1 The second extra argument.
7615 * @param a2 The third extra argument.
7616 * @param a3 The fourth extra argument.
7617 * @param a4 The fifth extra argument.
7618 */
7619#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
7620
7621/**
7622 * Defers the entire instruction emulation to a C implementation routine and
7623 * returns, only taking the standard parameters.
7624 *
7625 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7626 *
7627 * @param a_pfnCImpl The pointer to the C routine.
7628 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7629 */
7630#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7631
7632/**
7633 * Defers the entire instruction emulation to a C implementation routine and
7634 * returns, taking one argument in addition to the standard ones.
7635 *
7636 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7637 *
7638 * @param a_pfnCImpl The pointer to the C routine.
7639 * @param a0 The argument.
7640 */
7641#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7642
7643/**
7644 * Defers the entire instruction emulation to a C implementation routine and
7645 * returns, taking two arguments in addition to the standard ones.
7646 *
7647 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7648 *
7649 * @param a_pfnCImpl The pointer to the C routine.
7650 * @param a0 The first extra argument.
7651 * @param a1 The second extra argument.
7652 */
7653#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7654
7655/**
7656 * Defers the entire instruction emulation to a C implementation routine and
7657 * returns, taking three arguments in addition to the standard ones.
7658 *
7659 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7660 *
7661 * @param a_pfnCImpl The pointer to the C routine.
7662 * @param a0 The first extra argument.
7663 * @param a1 The second extra argument.
7664 * @param a2 The third extra argument.
7665 */
7666#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7667
7668/**
7669 * Calls a FPU assembly implementation taking one visible argument.
7670 *
7671 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7672 * @param a0 The first extra argument.
7673 */
7674#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
7675 do { \
7676 iemFpuPrepareUsage(pIemCpu); \
7677 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
7678 } while (0)
7679
7680/**
7681 * Calls a FPU assembly implementation taking two visible arguments.
7682 *
7683 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7684 * @param a0 The first extra argument.
7685 * @param a1 The second extra argument.
7686 */
7687#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
7688 do { \
7689 iemFpuPrepareUsage(pIemCpu); \
7690 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7691 } while (0)
7692
7693/**
7694 * Calls a FPU assembly implementation taking three visible arguments.
7695 *
7696 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7697 * @param a0 The first extra argument.
7698 * @param a1 The second extra argument.
7699 * @param a2 The third extra argument.
7700 */
7701#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7702 do { \
7703 iemFpuPrepareUsage(pIemCpu); \
7704 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7705 } while (0)
7706
7707#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
7708 do { \
7709 (a_FpuData).FSW = (a_FSW); \
7710 (a_FpuData).r80Result = *(a_pr80Value); \
7711 } while (0)
7712
7713/** Pushes FPU result onto the stack. */
7714#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
7715 iemFpuPushResult(pIemCpu, &a_FpuData)
7716/** Pushes FPU result onto the stack and sets the FPUDP. */
7717#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
7718 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
7719
7720/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
7721#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
7722 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
7723
7724/** Stores FPU result in a stack register. */
7725#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
7726 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
7727/** Stores FPU result in a stack register and pops the stack. */
7728#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
7729 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
7730/** Stores FPU result in a stack register and sets the FPUDP. */
7731#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7732 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7733/** Stores FPU result in a stack register, sets the FPUDP, and pops the
7734 * stack. */
7735#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7736 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7737
7738/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
7739#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
7740 iemFpuUpdateOpcodeAndIp(pIemCpu)
7741/** Free a stack register (for FFREE and FFREEP). */
7742#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
7743 iemFpuStackFree(pIemCpu, a_iStReg)
7744/** Increment the FPU stack pointer. */
7745#define IEM_MC_FPU_STACK_INC_TOP() \
7746 iemFpuStackIncTop(pIemCpu)
7747/** Decrement the FPU stack pointer. */
7748#define IEM_MC_FPU_STACK_DEC_TOP() \
7749 iemFpuStackDecTop(pIemCpu)
7750
7751/** Updates the FSW, FOP, FPUIP, and FPUCS. */
7752#define IEM_MC_UPDATE_FSW(a_u16FSW) \
7753 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7754/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
7755#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
7756 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7757/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
7758#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7759 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7760/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
7761#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
7762 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7763/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
7764 * stack. */
7765#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7766 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7767/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
7768#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
7769 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7770
7771/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
7772#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
7773 iemFpuStackUnderflow(pIemCpu, a_iStDst)
7774/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7775 * stack. */
7776#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
7777 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
7778/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7779 * FPUDS. */
7780#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7781 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7782/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7783 * FPUDS. Pops stack. */
7784#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7785 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7786/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7787 * stack twice. */
7788#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
7789 iemFpuStackUnderflowThenPopPop(pIemCpu)
7790/** Raises a FPU stack underflow exception for an instruction pushing a result
7791 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
7792#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
7793 iemFpuStackPushUnderflow(pIemCpu)
7794/** Raises a FPU stack underflow exception for an instruction pushing a result
7795 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
7796#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
7797 iemFpuStackPushUnderflowTwo(pIemCpu)
7798
7799/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7800 * FPUIP, FPUCS and FOP. */
7801#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
7802 iemFpuStackPushOverflow(pIemCpu)
7803/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7804 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
7805#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
7806 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
7807/** Indicates that we (might) have modified the FPU state. */
7808#define IEM_MC_USED_FPU() \
7809 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
7810
7811/**
7812 * Calls a MMX assembly implementation taking two visible arguments.
7813 *
7814 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7815 * @param a0 The first extra argument.
7816 * @param a1 The second extra argument.
7817 */
7818#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
7819 do { \
7820 iemFpuPrepareUsage(pIemCpu); \
7821 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7822 } while (0)
7823
7824/**
7825 * Calls a MMX assembly implementation taking three visible arguments.
7826 *
7827 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7828 * @param a0 The first extra argument.
7829 * @param a1 The second extra argument.
7830 * @param a2 The third extra argument.
7831 */
7832#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7833 do { \
7834 iemFpuPrepareUsage(pIemCpu); \
7835 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7836 } while (0)
7837
7838
7839/**
7840 * Calls a SSE assembly implementation taking two visible arguments.
7841 *
7842 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7843 * @param a0 The first extra argument.
7844 * @param a1 The second extra argument.
7845 */
7846#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
7847 do { \
7848 iemFpuPrepareUsageSse(pIemCpu); \
7849 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7850 } while (0)
7851
7852/**
7853 * Calls a SSE assembly implementation taking three visible arguments.
7854 *
7855 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7856 * @param a0 The first extra argument.
7857 * @param a1 The second extra argument.
7858 * @param a2 The third extra argument.
7859 */
7860#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7861 do { \
7862 iemFpuPrepareUsageSse(pIemCpu); \
7863 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7864 } while (0)
7865
7866
7867/** @note Not for IOPL or IF testing. */
7868#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
7869/** @note Not for IOPL or IF testing. */
7870#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
7871/** @note Not for IOPL or IF testing. */
7872#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
7873/** @note Not for IOPL or IF testing. */
7874#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
7875/** @note Not for IOPL or IF testing. */
7876#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
7877 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7878 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7879/** @note Not for IOPL or IF testing. */
7880#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7881 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7882 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7883/** @note Not for IOPL or IF testing. */
7884#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7885 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7886 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7887 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7888/** @note Not for IOPL or IF testing. */
7889#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7890 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7891 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7892 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7893#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7894#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7895#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7896/** @note Not for IOPL or IF testing. */
7897#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7898 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7899 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7900/** @note Not for IOPL or IF testing. */
7901#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7902 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7903 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7904/** @note Not for IOPL or IF testing. */
7905#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7906 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7907 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7908/** @note Not for IOPL or IF testing. */
7909#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7910 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7911 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7912/** @note Not for IOPL or IF testing. */
7913#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7914 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7915 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7916/** @note Not for IOPL or IF testing. */
7917#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7918 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7919 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7920#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7921#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7922#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7923 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7924#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7925 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7926#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7927 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7928#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7929 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7930#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7931 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7932#define IEM_MC_IF_FCW_IM() \
7933 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7934
7935#define IEM_MC_ELSE() } else {
7936#define IEM_MC_ENDIF() } do {} while (0)
7937
7938/** @} */
7939
7940
7941/** @name Opcode Debug Helpers.
7942 * @{
7943 */
7944#ifdef DEBUG
7945# define IEMOP_MNEMONIC(a_szMnemonic) \
7946 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7947 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7948# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7949 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7950 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7951#else
7952# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7953# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7954#endif
7955
7956/** @} */
7957
7958
7959/** @name Opcode Helpers.
7960 * @{
7961 */
7962
7963/** The instruction raises an \#UD in real and V8086 mode. */
7964#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7965 do \
7966 { \
7967 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7968 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7969 } while (0)
7970
7971/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7972 * lock prefixed.
7973 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7974#define IEMOP_HLP_NO_LOCK_PREFIX() \
7975 do \
7976 { \
7977 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7978 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7979 } while (0)
7980
7981/** The instruction is not available in 64-bit mode, throw #UD if we're in
7982 * 64-bit mode. */
7983#define IEMOP_HLP_NO_64BIT() \
7984 do \
7985 { \
7986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7987 return IEMOP_RAISE_INVALID_OPCODE(); \
7988 } while (0)
7989
7990/** The instruction is only available in 64-bit mode, throw #UD if we're not in
7991 * 64-bit mode. */
7992#define IEMOP_HLP_ONLY_64BIT() \
7993 do \
7994 { \
7995 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
7996 return IEMOP_RAISE_INVALID_OPCODE(); \
7997 } while (0)
7998
7999/** The instruction defaults to 64-bit operand size if 64-bit mode. */
8000#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
8001 do \
8002 { \
8003 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
8004 iemRecalEffOpSize64Default(pIemCpu); \
8005 } while (0)
8006
8007/** The instruction has 64-bit operand size if 64-bit mode. */
8008#define IEMOP_HLP_64BIT_OP_SIZE() \
8009 do \
8010 { \
8011 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
8012 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
8013 } while (0)
8014
8015/** Only a REX prefix immediately preceeding the first opcode byte takes
8016 * effect. This macro helps ensuring this as well as logging bad guest code. */
8017#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
8018 do \
8019 { \
8020 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
8021 { \
8022 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
8023 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
8024 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
8025 pIemCpu->uRexB = 0; \
8026 pIemCpu->uRexIndex = 0; \
8027 pIemCpu->uRexReg = 0; \
8028 iemRecalEffOpSize(pIemCpu); \
8029 } \
8030 } while (0)
8031
8032/**
8033 * Done decoding.
8034 */
8035#define IEMOP_HLP_DONE_DECODING() \
8036 do \
8037 { \
8038 /*nothing for now, maybe later... */ \
8039 } while (0)
8040
8041/**
8042 * Done decoding, raise \#UD exception if lock prefix present.
8043 */
8044#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
8045 do \
8046 { \
8047 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8048 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8049 } while (0)
8050#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
8051 do \
8052 { \
8053 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8054 { \
8055 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
8056 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8057 } \
8058 } while (0)
8059#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
8060 do \
8061 { \
8062 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8063 { \
8064 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
8065 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8066 } \
8067 } while (0)
8068
8069
8070/**
8071 * Calculates the effective address of a ModR/M memory operand.
8072 *
8073 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8074 *
8075 * @return Strict VBox status code.
8076 * @param pIemCpu The IEM per CPU data.
8077 * @param bRm The ModRM byte.
8078 * @param cbImm The size of any immediate following the
8079 * effective address opcode bytes. Important for
8080 * RIP relative addressing.
8081 * @param pGCPtrEff Where to return the effective address.
8082 */
8083static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
8084{
8085 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8086 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8087#define SET_SS_DEF() \
8088 do \
8089 { \
8090 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8091 pIemCpu->iEffSeg = X86_SREG_SS; \
8092 } while (0)
8093
8094 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
8095 {
8096/** @todo Check the effective address size crap! */
8097 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
8098 {
8099 uint16_t u16EffAddr;
8100
8101 /* Handle the disp16 form with no registers first. */
8102 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8103 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8104 else
8105 {
8106 /* Get the displacment. */
8107 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8108 {
8109 case 0: u16EffAddr = 0; break;
8110 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8111 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8112 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
8113 }
8114
8115 /* Add the base and index registers to the disp. */
8116 switch (bRm & X86_MODRM_RM_MASK)
8117 {
8118 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
8119 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
8120 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
8121 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
8122 case 4: u16EffAddr += pCtx->si; break;
8123 case 5: u16EffAddr += pCtx->di; break;
8124 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
8125 case 7: u16EffAddr += pCtx->bx; break;
8126 }
8127 }
8128
8129 *pGCPtrEff = u16EffAddr;
8130 }
8131 else
8132 {
8133 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8134 uint32_t u32EffAddr;
8135
8136 /* Handle the disp32 form with no registers first. */
8137 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8138 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8139 else
8140 {
8141 /* Get the register (or SIB) value. */
8142 switch ((bRm & X86_MODRM_RM_MASK))
8143 {
8144 case 0: u32EffAddr = pCtx->eax; break;
8145 case 1: u32EffAddr = pCtx->ecx; break;
8146 case 2: u32EffAddr = pCtx->edx; break;
8147 case 3: u32EffAddr = pCtx->ebx; break;
8148 case 4: /* SIB */
8149 {
8150 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8151
8152 /* Get the index and scale it. */
8153 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8154 {
8155 case 0: u32EffAddr = pCtx->eax; break;
8156 case 1: u32EffAddr = pCtx->ecx; break;
8157 case 2: u32EffAddr = pCtx->edx; break;
8158 case 3: u32EffAddr = pCtx->ebx; break;
8159 case 4: u32EffAddr = 0; /*none */ break;
8160 case 5: u32EffAddr = pCtx->ebp; break;
8161 case 6: u32EffAddr = pCtx->esi; break;
8162 case 7: u32EffAddr = pCtx->edi; break;
8163 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8164 }
8165 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8166
8167 /* add base */
8168 switch (bSib & X86_SIB_BASE_MASK)
8169 {
8170 case 0: u32EffAddr += pCtx->eax; break;
8171 case 1: u32EffAddr += pCtx->ecx; break;
8172 case 2: u32EffAddr += pCtx->edx; break;
8173 case 3: u32EffAddr += pCtx->ebx; break;
8174 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
8175 case 5:
8176 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8177 {
8178 u32EffAddr += pCtx->ebp;
8179 SET_SS_DEF();
8180 }
8181 else
8182 {
8183 uint32_t u32Disp;
8184 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8185 u32EffAddr += u32Disp;
8186 }
8187 break;
8188 case 6: u32EffAddr += pCtx->esi; break;
8189 case 7: u32EffAddr += pCtx->edi; break;
8190 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8191 }
8192 break;
8193 }
8194 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
8195 case 6: u32EffAddr = pCtx->esi; break;
8196 case 7: u32EffAddr = pCtx->edi; break;
8197 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8198 }
8199
8200 /* Get and add the displacement. */
8201 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8202 {
8203 case 0:
8204 break;
8205 case 1:
8206 {
8207 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8208 u32EffAddr += i8Disp;
8209 break;
8210 }
8211 case 2:
8212 {
8213 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8214 u32EffAddr += u32Disp;
8215 break;
8216 }
8217 default:
8218 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
8219 }
8220
8221 }
8222 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
8223 *pGCPtrEff = u32EffAddr;
8224 else
8225 {
8226 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
8227 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8228 }
8229 }
8230 }
8231 else
8232 {
8233 uint64_t u64EffAddr;
8234
8235 /* Handle the rip+disp32 form with no registers first. */
8236 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8237 {
8238 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8239 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
8240 }
8241 else
8242 {
8243 /* Get the register (or SIB) value. */
8244 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
8245 {
8246 case 0: u64EffAddr = pCtx->rax; break;
8247 case 1: u64EffAddr = pCtx->rcx; break;
8248 case 2: u64EffAddr = pCtx->rdx; break;
8249 case 3: u64EffAddr = pCtx->rbx; break;
8250 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
8251 case 6: u64EffAddr = pCtx->rsi; break;
8252 case 7: u64EffAddr = pCtx->rdi; break;
8253 case 8: u64EffAddr = pCtx->r8; break;
8254 case 9: u64EffAddr = pCtx->r9; break;
8255 case 10: u64EffAddr = pCtx->r10; break;
8256 case 11: u64EffAddr = pCtx->r11; break;
8257 case 13: u64EffAddr = pCtx->r13; break;
8258 case 14: u64EffAddr = pCtx->r14; break;
8259 case 15: u64EffAddr = pCtx->r15; break;
8260 /* SIB */
8261 case 4:
8262 case 12:
8263 {
8264 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8265
8266 /* Get the index and scale it. */
8267 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
8268 {
8269 case 0: u64EffAddr = pCtx->rax; break;
8270 case 1: u64EffAddr = pCtx->rcx; break;
8271 case 2: u64EffAddr = pCtx->rdx; break;
8272 case 3: u64EffAddr = pCtx->rbx; break;
8273 case 4: u64EffAddr = 0; /*none */ break;
8274 case 5: u64EffAddr = pCtx->rbp; break;
8275 case 6: u64EffAddr = pCtx->rsi; break;
8276 case 7: u64EffAddr = pCtx->rdi; break;
8277 case 8: u64EffAddr = pCtx->r8; break;
8278 case 9: u64EffAddr = pCtx->r9; break;
8279 case 10: u64EffAddr = pCtx->r10; break;
8280 case 11: u64EffAddr = pCtx->r11; break;
8281 case 12: u64EffAddr = pCtx->r12; break;
8282 case 13: u64EffAddr = pCtx->r13; break;
8283 case 14: u64EffAddr = pCtx->r14; break;
8284 case 15: u64EffAddr = pCtx->r15; break;
8285 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8286 }
8287 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8288
8289 /* add base */
8290 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
8291 {
8292 case 0: u64EffAddr += pCtx->rax; break;
8293 case 1: u64EffAddr += pCtx->rcx; break;
8294 case 2: u64EffAddr += pCtx->rdx; break;
8295 case 3: u64EffAddr += pCtx->rbx; break;
8296 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
8297 case 6: u64EffAddr += pCtx->rsi; break;
8298 case 7: u64EffAddr += pCtx->rdi; break;
8299 case 8: u64EffAddr += pCtx->r8; break;
8300 case 9: u64EffAddr += pCtx->r9; break;
8301 case 10: u64EffAddr += pCtx->r10; break;
8302 case 11: u64EffAddr += pCtx->r11; break;
8303 case 12: u64EffAddr += pCtx->r12; break;
8304 case 14: u64EffAddr += pCtx->r14; break;
8305 case 15: u64EffAddr += pCtx->r15; break;
8306 /* complicated encodings */
8307 case 5:
8308 case 13:
8309 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8310 {
8311 if (!pIemCpu->uRexB)
8312 {
8313 u64EffAddr += pCtx->rbp;
8314 SET_SS_DEF();
8315 }
8316 else
8317 u64EffAddr += pCtx->r13;
8318 }
8319 else
8320 {
8321 uint32_t u32Disp;
8322 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8323 u64EffAddr += (int32_t)u32Disp;
8324 }
8325 break;
8326 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8327 }
8328 break;
8329 }
8330 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8331 }
8332
8333 /* Get and add the displacement. */
8334 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8335 {
8336 case 0:
8337 break;
8338 case 1:
8339 {
8340 int8_t i8Disp;
8341 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8342 u64EffAddr += i8Disp;
8343 break;
8344 }
8345 case 2:
8346 {
8347 uint32_t u32Disp;
8348 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8349 u64EffAddr += (int32_t)u32Disp;
8350 break;
8351 }
8352 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8353 }
8354
8355 }
8356
8357 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
8358 *pGCPtrEff = u64EffAddr;
8359 else
8360 {
8361 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8362 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8363 }
8364 }
8365
8366 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8367 return VINF_SUCCESS;
8368}
8369
8370/** @} */
8371
8372
8373
8374/*
8375 * Include the instructions
8376 */
8377#include "IEMAllInstructions.cpp.h"
8378
8379
8380
8381
8382#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8383
8384/**
8385 * Sets up execution verification mode.
8386 */
8387static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
8388{
8389 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8390 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
8391
8392 /*
8393 * Always note down the address of the current instruction.
8394 */
8395 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
8396 pIemCpu->uOldRip = pOrgCtx->rip;
8397
8398 /*
8399 * Enable verification and/or logging.
8400 */
8401 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
8402 if ( fNewNoRem
8403 && ( 0
8404#if 0 /* auto enable on first paged protected mode interrupt */
8405 || ( pOrgCtx->eflags.Bits.u1IF
8406 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
8407 && TRPMHasTrap(pVCpu)
8408 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
8409#endif
8410#if 0
8411 || ( pOrgCtx->cs == 0x10
8412 && ( pOrgCtx->rip == 0x90119e3e
8413 || pOrgCtx->rip == 0x901d9810)
8414#endif
8415#if 0 /* Auto enable DSL - FPU stuff. */
8416 || ( pOrgCtx->cs == 0x10
8417 && (// pOrgCtx->rip == 0xc02ec07f
8418 //|| pOrgCtx->rip == 0xc02ec082
8419 //|| pOrgCtx->rip == 0xc02ec0c9
8420 0
8421 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
8422#endif
8423#if 0 /* Auto enable DSL - fstp st0 stuff. */
8424 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
8425#endif
8426#if 0
8427 || pOrgCtx->rip == 0x9022bb3a
8428#endif
8429#if 0
8430 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
8431#endif
8432#if 0
8433 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
8434 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
8435#endif
8436#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
8437 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
8438 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
8439 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
8440#endif
8441#if 0 /* NT4SP1 - xadd early boot. */
8442 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
8443#endif
8444#if 0 /* NT4SP1 - wrmsr (intel MSR). */
8445 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
8446#endif
8447#if 0 /* NT4SP1 - cmpxchg (AMD). */
8448 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
8449#endif
8450#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
8451 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
8452#endif
8453#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
8454 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
8455
8456#endif
8457#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
8458 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
8459
8460#endif
8461#if 0 /* NT4SP1 - frstor [ecx] */
8462 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
8463#endif
8464#if 0 /* xxxxxx - All long mode code. */
8465 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
8466#endif
8467#if 0 /* rep movsq linux 3.7 64-bit boot. */
8468 || (pOrgCtx->rip == 0x0000000000100241)
8469#endif
8470#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
8471 || (pOrgCtx->rip == 0x000000000215e240)
8472#endif
8473#if 0 /* DOS's size-overridden iret to v8086. */
8474 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
8475#endif
8476#if 1 /* Win3.1: port 64 interception in v8086 mofr */
8477 || (pOrgCtx->rip == 0xe9d6 && pOrgCtx->cs.Sel == 0xf000 && pOrgCtx->eflags.Bits.u1VM
8478 && pOrgCtx->tr.u64Base == 0x80049e8c && pOrgCtx->tr.u32Limit == 0x2069)
8479#endif
8480 )
8481 )
8482 {
8483 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
8484 RTLogFlags(NULL, "enabled");
8485 fNewNoRem = false;
8486 }
8487 if (fNewNoRem != pIemCpu->fNoRem)
8488 {
8489 pIemCpu->fNoRem = fNewNoRem;
8490 if (!fNewNoRem)
8491 {
8492 LogAlways(("Enabling verification mode!\n"));
8493 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
8494 }
8495 else
8496 LogAlways(("Disabling verification mode!\n"));
8497 }
8498
8499 /*
8500 * Switch state.
8501 */
8502 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8503 {
8504 static CPUMCTX s_DebugCtx; /* Ugly! */
8505
8506 s_DebugCtx = *pOrgCtx;
8507 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
8508 }
8509
8510 /*
8511 * See if there is an interrupt pending in TRPM and inject it if we can.
8512 */
8513 pIemCpu->uInjectCpl = UINT8_MAX;
8514 if ( pOrgCtx->eflags.Bits.u1IF
8515 && TRPMHasTrap(pVCpu)
8516 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
8517 {
8518 uint8_t u8TrapNo;
8519 TRPMEVENT enmType;
8520 RTGCUINT uErrCode;
8521 RTGCPTR uCr2;
8522 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
8523 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
8524 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8525 TRPMResetTrap(pVCpu);
8526 pIemCpu->uInjectCpl = pIemCpu->uCpl;
8527 }
8528
8529 /*
8530 * Reset the counters.
8531 */
8532 pIemCpu->cIOReads = 0;
8533 pIemCpu->cIOWrites = 0;
8534 pIemCpu->fIgnoreRaxRdx = false;
8535 pIemCpu->fOverlappingMovs = false;
8536 pIemCpu->fUndefinedEFlags = 0;
8537
8538 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8539 {
8540 /*
8541 * Free all verification records.
8542 */
8543 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
8544 pIemCpu->pIemEvtRecHead = NULL;
8545 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
8546 do
8547 {
8548 while (pEvtRec)
8549 {
8550 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
8551 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
8552 pIemCpu->pFreeEvtRec = pEvtRec;
8553 pEvtRec = pNext;
8554 }
8555 pEvtRec = pIemCpu->pOtherEvtRecHead;
8556 pIemCpu->pOtherEvtRecHead = NULL;
8557 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
8558 } while (pEvtRec);
8559 }
8560}
8561
8562
8563/**
8564 * Allocate an event record.
8565 * @returns Pointer to a record.
8566 */
8567static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
8568{
8569 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8570 return NULL;
8571
8572 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
8573 if (pEvtRec)
8574 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
8575 else
8576 {
8577 if (!pIemCpu->ppIemEvtRecNext)
8578 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
8579
8580 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
8581 if (!pEvtRec)
8582 return NULL;
8583 }
8584 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
8585 pEvtRec->pNext = NULL;
8586 return pEvtRec;
8587}
8588
8589
8590/**
8591 * IOMMMIORead notification.
8592 */
8593VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
8594{
8595 PVMCPU pVCpu = VMMGetCpu(pVM);
8596 if (!pVCpu)
8597 return;
8598 PIEMCPU pIemCpu = &pVCpu->iem.s;
8599 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8600 if (!pEvtRec)
8601 return;
8602 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8603 pEvtRec->u.RamRead.GCPhys = GCPhys;
8604 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
8605 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8606 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8607}
8608
8609
8610/**
8611 * IOMMMIOWrite notification.
8612 */
8613VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
8614{
8615 PVMCPU pVCpu = VMMGetCpu(pVM);
8616 if (!pVCpu)
8617 return;
8618 PIEMCPU pIemCpu = &pVCpu->iem.s;
8619 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8620 if (!pEvtRec)
8621 return;
8622 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8623 pEvtRec->u.RamWrite.GCPhys = GCPhys;
8624 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
8625 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
8626 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
8627 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
8628 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
8629 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8630 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8631}
8632
8633
8634/**
8635 * IOMIOPortRead notification.
8636 */
8637VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
8638{
8639 PVMCPU pVCpu = VMMGetCpu(pVM);
8640 if (!pVCpu)
8641 return;
8642 PIEMCPU pIemCpu = &pVCpu->iem.s;
8643 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8644 if (!pEvtRec)
8645 return;
8646 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8647 pEvtRec->u.IOPortRead.Port = Port;
8648 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8649 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8650 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8651}
8652
8653/**
8654 * IOMIOPortWrite notification.
8655 */
8656VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8657{
8658 PVMCPU pVCpu = VMMGetCpu(pVM);
8659 if (!pVCpu)
8660 return;
8661 PIEMCPU pIemCpu = &pVCpu->iem.s;
8662 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8663 if (!pEvtRec)
8664 return;
8665 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8666 pEvtRec->u.IOPortWrite.Port = Port;
8667 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8668 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8669 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8670 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8671}
8672
8673
8674VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
8675{
8676 AssertFailed();
8677}
8678
8679
8680VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
8681{
8682 AssertFailed();
8683}
8684
8685
8686/**
8687 * Fakes and records an I/O port read.
8688 *
8689 * @returns VINF_SUCCESS.
8690 * @param pIemCpu The IEM per CPU data.
8691 * @param Port The I/O port.
8692 * @param pu32Value Where to store the fake value.
8693 * @param cbValue The size of the access.
8694 */
8695static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8696{
8697 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8698 if (pEvtRec)
8699 {
8700 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8701 pEvtRec->u.IOPortRead.Port = Port;
8702 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8703 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8704 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8705 }
8706 pIemCpu->cIOReads++;
8707 *pu32Value = 0xcccccccc;
8708 return VINF_SUCCESS;
8709}
8710
8711
8712/**
8713 * Fakes and records an I/O port write.
8714 *
8715 * @returns VINF_SUCCESS.
8716 * @param pIemCpu The IEM per CPU data.
8717 * @param Port The I/O port.
8718 * @param u32Value The value being written.
8719 * @param cbValue The size of the access.
8720 */
8721static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8722{
8723 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8724 if (pEvtRec)
8725 {
8726 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8727 pEvtRec->u.IOPortWrite.Port = Port;
8728 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8729 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8730 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8731 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8732 }
8733 pIemCpu->cIOWrites++;
8734 return VINF_SUCCESS;
8735}
8736
8737
8738/**
8739 * Used to add extra details about a stub case.
8740 * @param pIemCpu The IEM per CPU state.
8741 */
8742static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
8743{
8744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8745 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8746 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8747 char szRegs[4096];
8748 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
8749 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
8750 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
8751 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
8752 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
8753 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
8754 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
8755 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
8756 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
8757 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
8758 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
8759 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
8760 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
8761 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
8762 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
8763 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
8764 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
8765 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
8766 " efer=%016VR{efer}\n"
8767 " pat=%016VR{pat}\n"
8768 " sf_mask=%016VR{sf_mask}\n"
8769 "krnl_gs_base=%016VR{krnl_gs_base}\n"
8770 " lstar=%016VR{lstar}\n"
8771 " star=%016VR{star} cstar=%016VR{cstar}\n"
8772 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
8773 );
8774
8775 char szInstr1[256];
8776 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
8777 DBGF_DISAS_FLAGS_DEFAULT_MODE,
8778 szInstr1, sizeof(szInstr1), NULL);
8779 char szInstr2[256];
8780 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
8781 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8782 szInstr2, sizeof(szInstr2), NULL);
8783
8784 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
8785}
8786
8787
8788/**
8789 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
8790 * dump to the assertion info.
8791 *
8792 * @param pEvtRec The record to dump.
8793 */
8794static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
8795{
8796 switch (pEvtRec->enmEvent)
8797 {
8798 case IEMVERIFYEVENT_IOPORT_READ:
8799 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
8800 pEvtRec->u.IOPortWrite.Port,
8801 pEvtRec->u.IOPortWrite.cbValue);
8802 break;
8803 case IEMVERIFYEVENT_IOPORT_WRITE:
8804 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
8805 pEvtRec->u.IOPortWrite.Port,
8806 pEvtRec->u.IOPortWrite.cbValue,
8807 pEvtRec->u.IOPortWrite.u32Value);
8808 break;
8809 case IEMVERIFYEVENT_RAM_READ:
8810 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
8811 pEvtRec->u.RamRead.GCPhys,
8812 pEvtRec->u.RamRead.cb);
8813 break;
8814 case IEMVERIFYEVENT_RAM_WRITE:
8815 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
8816 pEvtRec->u.RamWrite.GCPhys,
8817 pEvtRec->u.RamWrite.cb,
8818 (int)pEvtRec->u.RamWrite.cb,
8819 pEvtRec->u.RamWrite.ab);
8820 break;
8821 default:
8822 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
8823 break;
8824 }
8825}
8826
8827
8828/**
8829 * Raises an assertion on the specified record, showing the given message with
8830 * a record dump attached.
8831 *
8832 * @param pIemCpu The IEM per CPU data.
8833 * @param pEvtRec1 The first record.
8834 * @param pEvtRec2 The second record.
8835 * @param pszMsg The message explaining why we're asserting.
8836 */
8837static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
8838{
8839 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8840 iemVerifyAssertAddRecordDump(pEvtRec1);
8841 iemVerifyAssertAddRecordDump(pEvtRec2);
8842 iemVerifyAssertMsg2(pIemCpu);
8843 RTAssertPanic();
8844}
8845
8846
8847/**
8848 * Raises an assertion on the specified record, showing the given message with
8849 * a record dump attached.
8850 *
8851 * @param pIemCpu The IEM per CPU data.
8852 * @param pEvtRec1 The first record.
8853 * @param pszMsg The message explaining why we're asserting.
8854 */
8855static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
8856{
8857 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8858 iemVerifyAssertAddRecordDump(pEvtRec);
8859 iemVerifyAssertMsg2(pIemCpu);
8860 RTAssertPanic();
8861}
8862
8863
8864/**
8865 * Verifies a write record.
8866 *
8867 * @param pIemCpu The IEM per CPU data.
8868 * @param pEvtRec The write record.
8869 */
8870static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
8871{
8872 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
8873 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
8874 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
8875 if ( RT_FAILURE(rc)
8876 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
8877 {
8878 /* fend off ins */
8879 if ( !pIemCpu->cIOReads
8880 || pEvtRec->u.RamWrite.ab[0] != 0xcc
8881 || ( pEvtRec->u.RamWrite.cb != 1
8882 && pEvtRec->u.RamWrite.cb != 2
8883 && pEvtRec->u.RamWrite.cb != 4) )
8884 {
8885 /* fend off ROMs and MMIO */
8886 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
8887 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
8888 {
8889 /* fend off fxsave */
8890 if (pEvtRec->u.RamWrite.cb != 512)
8891 {
8892 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8893 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
8894 RTAssertMsg2Add("REM: %.*Rhxs\n"
8895 "IEM: %.*Rhxs\n",
8896 pEvtRec->u.RamWrite.cb, abBuf,
8897 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
8898 iemVerifyAssertAddRecordDump(pEvtRec);
8899 iemVerifyAssertMsg2(pIemCpu);
8900 RTAssertPanic();
8901 }
8902 }
8903 }
8904 }
8905
8906}
8907
8908/**
8909 * Performs the post-execution verfication checks.
8910 */
8911static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
8912{
8913 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8914 return;
8915
8916 /*
8917 * Switch back the state.
8918 */
8919 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
8920 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
8921 Assert(pOrgCtx != pDebugCtx);
8922 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8923
8924 /*
8925 * Execute the instruction in REM.
8926 */
8927 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8928 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
8929#if 1
8930 if ( HMIsEnabled(pVM)
8931 && pIemCpu->cIOReads == 0
8932 && pIemCpu->cIOWrites == 0)
8933 do
8934 rc = EMR3HmSingleInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu), EM_ONE_INS_FLAGS_RIP_CHANGE);
8935 while (rc == VINF_SUCCESS);
8936#endif
8937 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
8938 || rc == VINF_IOM_R3_IOPORT_READ
8939 || rc == VINF_IOM_R3_IOPORT_WRITE
8940 || rc == VINF_IOM_R3_MMIO_READ
8941 || rc == VINF_IOM_R3_MMIO_READ_WRITE
8942 || rc == VINF_IOM_R3_MMIO_WRITE
8943 )
8944 {
8945 EMRemLock(pVM);
8946 rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
8947 AssertRC(rc);
8948 EMRemUnlock(pVM);
8949 }
8950
8951 /*
8952 * Compare the register states.
8953 */
8954 unsigned cDiffs = 0;
8955 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
8956 {
8957 //Log(("REM and IEM ends up with different registers!\n"));
8958
8959# define CHECK_FIELD(a_Field) \
8960 do \
8961 { \
8962 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8963 { \
8964 switch (sizeof(pOrgCtx->a_Field)) \
8965 { \
8966 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8967 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8968 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8969 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8970 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8971 } \
8972 cDiffs++; \
8973 } \
8974 } while (0)
8975
8976# define CHECK_BIT_FIELD(a_Field) \
8977 do \
8978 { \
8979 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8980 { \
8981 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8982 cDiffs++; \
8983 } \
8984 } while (0)
8985
8986# define CHECK_SEL(a_Sel) \
8987 do \
8988 { \
8989 CHECK_FIELD(a_Sel.Sel); \
8990 CHECK_FIELD(a_Sel.Attr.u); \
8991 CHECK_FIELD(a_Sel.u64Base); \
8992 CHECK_FIELD(a_Sel.u32Limit); \
8993 CHECK_FIELD(a_Sel.fFlags); \
8994 } while (0)
8995
8996#if 1 /* The recompiler doesn't update these the intel way. */
8997 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8998 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8999 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
9000 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
9001 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
9002 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
9003 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
9004 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
9005 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
9006 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
9007#endif
9008 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
9009 {
9010 RTAssertMsg2Weak(" the FPU state differs\n");
9011 cDiffs++;
9012 CHECK_FIELD(fpu.FCW);
9013 CHECK_FIELD(fpu.FSW);
9014 CHECK_FIELD(fpu.FTW);
9015 CHECK_FIELD(fpu.FOP);
9016 CHECK_FIELD(fpu.FPUIP);
9017 CHECK_FIELD(fpu.CS);
9018 CHECK_FIELD(fpu.Rsrvd1);
9019 CHECK_FIELD(fpu.FPUDP);
9020 CHECK_FIELD(fpu.DS);
9021 CHECK_FIELD(fpu.Rsrvd2);
9022 CHECK_FIELD(fpu.MXCSR);
9023 CHECK_FIELD(fpu.MXCSR_MASK);
9024 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
9025 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
9026 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
9027 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
9028 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
9029 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
9030 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
9031 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
9032 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
9033 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
9034 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
9035 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
9036 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
9037 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
9038 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
9039 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
9040 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
9041 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
9042 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
9043 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
9044 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
9045 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
9046 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
9047 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
9048 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
9049 CHECK_FIELD(fpu.au32RsrvdRest[i]);
9050 }
9051 CHECK_FIELD(rip);
9052 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
9053 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
9054 {
9055 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
9056 CHECK_BIT_FIELD(rflags.Bits.u1CF);
9057 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
9058 CHECK_BIT_FIELD(rflags.Bits.u1PF);
9059 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
9060 CHECK_BIT_FIELD(rflags.Bits.u1AF);
9061 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
9062 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
9063 CHECK_BIT_FIELD(rflags.Bits.u1SF);
9064 CHECK_BIT_FIELD(rflags.Bits.u1TF);
9065 CHECK_BIT_FIELD(rflags.Bits.u1IF);
9066 CHECK_BIT_FIELD(rflags.Bits.u1DF);
9067 CHECK_BIT_FIELD(rflags.Bits.u1OF);
9068 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
9069 CHECK_BIT_FIELD(rflags.Bits.u1NT);
9070 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
9071 CHECK_BIT_FIELD(rflags.Bits.u1RF);
9072 CHECK_BIT_FIELD(rflags.Bits.u1VM);
9073 CHECK_BIT_FIELD(rflags.Bits.u1AC);
9074 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
9075 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
9076 CHECK_BIT_FIELD(rflags.Bits.u1ID);
9077 }
9078
9079 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
9080 CHECK_FIELD(rax);
9081 CHECK_FIELD(rcx);
9082 if (!pIemCpu->fIgnoreRaxRdx)
9083 CHECK_FIELD(rdx);
9084 CHECK_FIELD(rbx);
9085 CHECK_FIELD(rsp);
9086 CHECK_FIELD(rbp);
9087 CHECK_FIELD(rsi);
9088 CHECK_FIELD(rdi);
9089 CHECK_FIELD(r8);
9090 CHECK_FIELD(r9);
9091 CHECK_FIELD(r10);
9092 CHECK_FIELD(r11);
9093 CHECK_FIELD(r12);
9094 CHECK_FIELD(r13);
9095 CHECK_SEL(cs);
9096 CHECK_SEL(ss);
9097 CHECK_SEL(ds);
9098 CHECK_SEL(es);
9099 CHECK_SEL(fs);
9100 CHECK_SEL(gs);
9101 CHECK_FIELD(cr0);
9102 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
9103 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
9104 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
9105 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
9106 if (pOrgCtx->cr2 != pDebugCtx->cr2)
9107 {
9108 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
9109 { /* ignore */ }
9110 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
9111 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
9112 { /* ignore */ }
9113 else
9114 CHECK_FIELD(cr2);
9115 }
9116 CHECK_FIELD(cr3);
9117 CHECK_FIELD(cr4);
9118 CHECK_FIELD(dr[0]);
9119 CHECK_FIELD(dr[1]);
9120 CHECK_FIELD(dr[2]);
9121 CHECK_FIELD(dr[3]);
9122 CHECK_FIELD(dr[6]);
9123 if ((pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
9124 CHECK_FIELD(dr[7]);
9125 CHECK_FIELD(gdtr.cbGdt);
9126 CHECK_FIELD(gdtr.pGdt);
9127 CHECK_FIELD(idtr.cbIdt);
9128 CHECK_FIELD(idtr.pIdt);
9129 CHECK_SEL(ldtr);
9130 CHECK_SEL(tr);
9131 CHECK_FIELD(SysEnter.cs);
9132 CHECK_FIELD(SysEnter.eip);
9133 CHECK_FIELD(SysEnter.esp);
9134 CHECK_FIELD(msrEFER);
9135 CHECK_FIELD(msrSTAR);
9136 CHECK_FIELD(msrPAT);
9137 CHECK_FIELD(msrLSTAR);
9138 CHECK_FIELD(msrCSTAR);
9139 CHECK_FIELD(msrSFMASK);
9140 CHECK_FIELD(msrKERNELGSBASE);
9141
9142 if (cDiffs != 0)
9143 {
9144 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
9145 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
9146 iemVerifyAssertMsg2(pIemCpu);
9147 RTAssertPanic();
9148 }
9149# undef CHECK_FIELD
9150# undef CHECK_BIT_FIELD
9151 }
9152
9153 /*
9154 * If the register state compared fine, check the verification event
9155 * records.
9156 */
9157 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
9158 {
9159 /*
9160 * Compare verficiation event records.
9161 * - I/O port accesses should be a 1:1 match.
9162 */
9163 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
9164 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
9165 while (pIemRec && pOtherRec)
9166 {
9167 /* Since we might miss RAM writes and reads, ignore reads and check
9168 that any written memory is the same extra ones. */
9169 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
9170 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
9171 && pIemRec->pNext)
9172 {
9173 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9174 iemVerifyWriteRecord(pIemCpu, pIemRec);
9175 pIemRec = pIemRec->pNext;
9176 }
9177
9178 /* Do the compare. */
9179 if (pIemRec->enmEvent != pOtherRec->enmEvent)
9180 {
9181 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
9182 break;
9183 }
9184 bool fEquals;
9185 switch (pIemRec->enmEvent)
9186 {
9187 case IEMVERIFYEVENT_IOPORT_READ:
9188 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
9189 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
9190 break;
9191 case IEMVERIFYEVENT_IOPORT_WRITE:
9192 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
9193 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
9194 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
9195 break;
9196 case IEMVERIFYEVENT_RAM_READ:
9197 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
9198 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
9199 break;
9200 case IEMVERIFYEVENT_RAM_WRITE:
9201 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
9202 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
9203 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
9204 break;
9205 default:
9206 fEquals = false;
9207 break;
9208 }
9209 if (!fEquals)
9210 {
9211 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
9212 break;
9213 }
9214
9215 /* advance */
9216 pIemRec = pIemRec->pNext;
9217 pOtherRec = pOtherRec->pNext;
9218 }
9219
9220 /* Ignore extra writes and reads. */
9221 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
9222 {
9223 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9224 iemVerifyWriteRecord(pIemCpu, pIemRec);
9225 pIemRec = pIemRec->pNext;
9226 }
9227 if (pIemRec != NULL)
9228 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
9229 else if (pOtherRec != NULL)
9230 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
9231 }
9232 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
9233}
9234
9235#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9236
9237/* stubs */
9238static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9239{
9240 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
9241 return VERR_INTERNAL_ERROR;
9242}
9243
9244static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9245{
9246 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
9247 return VERR_INTERNAL_ERROR;
9248}
9249
9250#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9251
9252
9253/**
9254 * Makes status code addjustments (pass up from I/O and access handler)
9255 * as well as maintaining statistics.
9256 *
9257 * @returns Strict VBox status code to pass up.
9258 * @param pIemCpu The IEM per CPU data.
9259 * @param rcStrict The status from executing an instruction.
9260 */
9261DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
9262{
9263 if (rcStrict != VINF_SUCCESS)
9264 {
9265 if (RT_SUCCESS(rcStrict))
9266 {
9267 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
9268 || rcStrict == VINF_IOM_R3_IOPORT_READ
9269 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
9270 || rcStrict == VINF_IOM_R3_MMIO_READ
9271 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
9272 || rcStrict == VINF_IOM_R3_MMIO_WRITE
9273 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9274 int32_t const rcPassUp = pIemCpu->rcPassUp;
9275 if (rcPassUp == VINF_SUCCESS)
9276 pIemCpu->cRetInfStatuses++;
9277 else if ( rcPassUp < VINF_EM_FIRST
9278 || rcPassUp > VINF_EM_LAST
9279 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
9280 {
9281 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9282 pIemCpu->cRetPassUpStatus++;
9283 rcStrict = rcPassUp;
9284 }
9285 else
9286 {
9287 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9288 pIemCpu->cRetInfStatuses++;
9289 }
9290 }
9291 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
9292 pIemCpu->cRetAspectNotImplemented++;
9293 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
9294 pIemCpu->cRetInstrNotImplemented++;
9295#ifdef IEM_VERIFICATION_MODE_FULL
9296 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
9297 rcStrict = VINF_SUCCESS;
9298#endif
9299 else
9300 pIemCpu->cRetErrStatuses++;
9301 }
9302 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
9303 {
9304 pIemCpu->cRetPassUpStatus++;
9305 rcStrict = pIemCpu->rcPassUp;
9306 }
9307
9308 return rcStrict;
9309}
9310
9311
9312/**
9313 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9314 * IEMExecOneWithPrefetchedByPC.
9315 *
9316 * @return Strict VBox status code.
9317 * @param pVCpu The current virtual CPU.
9318 * @param pIemCpu The IEM per CPU data.
9319 * @param fExecuteInhibit If set, execute the instruction following CLI,
9320 * POP SS and MOV SS,GR.
9321 */
9322DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
9323{
9324 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9325 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9326 if (rcStrict == VINF_SUCCESS)
9327 pIemCpu->cInstructions++;
9328 if (pIemCpu->cActiveMappings > 0)
9329 iemMemRollback(pIemCpu);
9330//#ifdef DEBUG
9331// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
9332//#endif
9333
9334 /* Execute the next instruction as well if a cli, pop ss or
9335 mov ss, Gr has just completed successfully. */
9336 if ( fExecuteInhibit
9337 && rcStrict == VINF_SUCCESS
9338 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9339 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
9340 {
9341 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
9342 if (rcStrict == VINF_SUCCESS)
9343 {
9344 b; IEM_OPCODE_GET_NEXT_U8(&b);
9345 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9346 if (rcStrict == VINF_SUCCESS)
9347 pIemCpu->cInstructions++;
9348 if (pIemCpu->cActiveMappings > 0)
9349 iemMemRollback(pIemCpu);
9350 }
9351 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
9352 }
9353
9354 /*
9355 * Return value fiddling, statistics and sanity assertions.
9356 */
9357 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9358
9359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
9360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
9361#if defined(IEM_VERIFICATION_MODE_FULL)
9362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
9363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
9364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
9365 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
9366#endif
9367 return rcStrict;
9368}
9369
9370
9371#ifdef IN_RC
9372/**
9373 * Re-enters raw-mode or ensure we return to ring-3.
9374 *
9375 * @returns rcStrict, maybe modified.
9376 * @param pIemCpu The IEM CPU structure.
9377 * @param pVCpu The cross context virtual CPU structure of the caller.
9378 * @param pCtx The current CPU context.
9379 * @param rcStrict The status code returne by the interpreter.
9380 */
9381DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
9382{
9383 if (!pIemCpu->fInPatchCode)
9384 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
9385 return rcStrict;
9386}
9387#endif
9388
9389
9390/**
9391 * Execute one instruction.
9392 *
9393 * @return Strict VBox status code.
9394 * @param pVCpu The current virtual CPU.
9395 */
9396VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
9397{
9398 PIEMCPU pIemCpu = &pVCpu->iem.s;
9399
9400#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9401 iemExecVerificationModeSetup(pIemCpu);
9402#endif
9403#ifdef LOG_ENABLED
9404 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9405# ifdef IN_RING3
9406 if (LogIs2Enabled())
9407 {
9408 char szInstr[256];
9409 uint32_t cbInstr = 0;
9410 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9411 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9412 szInstr, sizeof(szInstr), &cbInstr);
9413
9414 Log2(("**** "
9415 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9416 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9417 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9418 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9419 " %s\n"
9420 ,
9421 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9422 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9423 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9424 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9425 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9426 szInstr));
9427
9428 if (LogIs3Enabled())
9429 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9430 }
9431 else
9432# endif
9433 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9434 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9435#endif
9436
9437 /*
9438 * Do the decoding and emulation.
9439 */
9440 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9441 if (rcStrict == VINF_SUCCESS)
9442 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9443
9444#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9445 /*
9446 * Assert some sanity.
9447 */
9448 iemExecVerificationModeCheck(pIemCpu);
9449#endif
9450#ifdef IN_RC
9451 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9452#endif
9453 if (rcStrict != VINF_SUCCESS)
9454 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9455 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9456 return rcStrict;
9457}
9458
9459
9460VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9461{
9462 PIEMCPU pIemCpu = &pVCpu->iem.s;
9463 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9464 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9465
9466 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9467 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9468 if (rcStrict == VINF_SUCCESS)
9469 {
9470 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9471 if (pcbWritten)
9472 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9473 }
9474
9475#ifdef IN_RC
9476 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9477#endif
9478 return rcStrict;
9479}
9480
9481
9482VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9483 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9484{
9485 PIEMCPU pIemCpu = &pVCpu->iem.s;
9486 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9487 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9488
9489 VBOXSTRICTRC rcStrict;
9490 if ( cbOpcodeBytes
9491 && pCtx->rip == OpcodeBytesPC)
9492 {
9493 iemInitDecoder(pIemCpu, false);
9494 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9495 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9496 rcStrict = VINF_SUCCESS;
9497 }
9498 else
9499 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9500 if (rcStrict == VINF_SUCCESS)
9501 {
9502 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9503 }
9504
9505#ifdef IN_RC
9506 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9507#endif
9508 return rcStrict;
9509}
9510
9511
9512VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9513{
9514 PIEMCPU pIemCpu = &pVCpu->iem.s;
9515 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9516 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9517
9518 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9519 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9520 if (rcStrict == VINF_SUCCESS)
9521 {
9522 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9523 if (pcbWritten)
9524 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9525 }
9526
9527#ifdef IN_RC
9528 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9529#endif
9530 return rcStrict;
9531}
9532
9533
9534VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9535 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9536{
9537 PIEMCPU pIemCpu = &pVCpu->iem.s;
9538 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9539 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9540
9541 VBOXSTRICTRC rcStrict;
9542 if ( cbOpcodeBytes
9543 && pCtx->rip == OpcodeBytesPC)
9544 {
9545 iemInitDecoder(pIemCpu, true);
9546 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9547 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9548 rcStrict = VINF_SUCCESS;
9549 }
9550 else
9551 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9552 if (rcStrict == VINF_SUCCESS)
9553 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9554
9555#ifdef IN_RC
9556 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9557#endif
9558 return rcStrict;
9559}
9560
9561
9562VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
9563{
9564 PIEMCPU pIemCpu = &pVCpu->iem.s;
9565 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9566
9567 /*
9568 * See if there is an interrupt pending in TRPM and inject it if we can.
9569 */
9570#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
9571# ifdef IEM_VERIFICATION_MODE_FULL
9572 pIemCpu->uInjectCpl = UINT8_MAX;
9573# endif
9574 if ( pCtx->eflags.Bits.u1IF
9575 && TRPMHasTrap(pVCpu)
9576 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
9577 {
9578 uint8_t u8TrapNo;
9579 TRPMEVENT enmType;
9580 RTGCUINT uErrCode;
9581 RTGCPTR uCr2;
9582 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9583 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
9584 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9585 TRPMResetTrap(pVCpu);
9586 }
9587#else
9588 iemExecVerificationModeSetup(pIemCpu);
9589#endif
9590
9591 /*
9592 * Log the state.
9593 */
9594#ifdef LOG_ENABLED
9595# ifdef IN_RING3
9596 if (LogIs2Enabled())
9597 {
9598 char szInstr[256];
9599 uint32_t cbInstr = 0;
9600 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9601 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9602 szInstr, sizeof(szInstr), &cbInstr);
9603
9604 Log2(("**** "
9605 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9606 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9607 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9608 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9609 " %s\n"
9610 ,
9611 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9612 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9613 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9614 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9615 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9616 szInstr));
9617
9618 if (LogIs3Enabled())
9619 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9620 }
9621 else
9622# endif
9623 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9624 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9625#endif
9626
9627 /*
9628 * Do the decoding and emulation.
9629 */
9630 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9631 if (rcStrict == VINF_SUCCESS)
9632 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9633
9634#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9635 /*
9636 * Assert some sanity.
9637 */
9638 iemExecVerificationModeCheck(pIemCpu);
9639#endif
9640
9641 /*
9642 * Maybe re-enter raw-mode and log.
9643 */
9644#ifdef IN_RC
9645 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9646#endif
9647 if (rcStrict != VINF_SUCCESS)
9648 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9649 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9650 return rcStrict;
9651}
9652
9653
9654
9655/**
9656 * Injects a trap, fault, abort, software interrupt or external interrupt.
9657 *
9658 * The parameter list matches TRPMQueryTrapAll pretty closely.
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The current virtual CPU.
9662 * @param u8TrapNo The trap number.
9663 * @param enmType What type is it (trap/fault/abort), software
9664 * interrupt or hardware interrupt.
9665 * @param uErrCode The error code if applicable.
9666 * @param uCr2 The CR2 value if applicable.
9667 */
9668VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
9669{
9670 iemInitDecoder(&pVCpu->iem.s, false);
9671#ifdef DBGFTRACE_ENABLED
9672 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9673 u8TrapNo, enmType, uErrCode, uCr2);
9674#endif
9675
9676 uint32_t fFlags;
9677 switch (enmType)
9678 {
9679 case TRPM_HARDWARE_INT:
9680 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9681 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9682 uErrCode = uCr2 = 0;
9683 break;
9684
9685 case TRPM_SOFTWARE_INT:
9686 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9687 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9688 uErrCode = uCr2 = 0;
9689 break;
9690
9691 case TRPM_TRAP:
9692 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9693 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9694 if (u8TrapNo == X86_XCPT_PF)
9695 fFlags |= IEM_XCPT_FLAGS_CR2;
9696 switch (u8TrapNo)
9697 {
9698 case X86_XCPT_DF:
9699 case X86_XCPT_TS:
9700 case X86_XCPT_NP:
9701 case X86_XCPT_SS:
9702 case X86_XCPT_PF:
9703 case X86_XCPT_AC:
9704 fFlags |= IEM_XCPT_FLAGS_ERR;
9705 break;
9706 }
9707 break;
9708
9709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9710 }
9711
9712 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
9713}
9714
9715
9716VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9717{
9718 return VERR_NOT_IMPLEMENTED;
9719}
9720
9721
9722VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9723{
9724 return VERR_NOT_IMPLEMENTED;
9725}
9726
9727
9728#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
9729/**
9730 * Executes a IRET instruction with default operand size.
9731 *
9732 * This is for PATM.
9733 *
9734 * @returns VBox status code.
9735 * @param pVCpu The current virtual CPU.
9736 * @param pCtxCore The register frame.
9737 */
9738VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
9739{
9740 PIEMCPU pIemCpu = &pVCpu->iem.s;
9741 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9742
9743 iemCtxCoreToCtx(pCtx, pCtxCore);
9744 iemInitDecoder(pIemCpu);
9745 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
9746 if (rcStrict == VINF_SUCCESS)
9747 iemCtxToCtxCore(pCtxCore, pCtx);
9748 else
9749 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9750 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9751 return rcStrict;
9752}
9753#endif
9754
9755
9756
9757/**
9758 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9759 *
9760 * This API ASSUMES that the caller has already verified that the guest code is
9761 * allowed to access the I/O port. (The I/O port is in the DX register in the
9762 * guest state.)
9763 *
9764 * @returns Strict VBox status code.
9765 * @param pVCpu The cross context per virtual CPU structure.
9766 * @param cbValue The size of the I/O port access (1, 2, or 4).
9767 * @param enmAddrMode The addressing mode.
9768 * @param fRepPrefix Indicates whether a repeat prefix is used
9769 * (doesn't matter which for this instruction).
9770 * @param cbInstr The instruction length in bytes.
9771 * @param iEffSeg The effective segment address.
9772 */
9773VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9774 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
9775{
9776 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9777 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
9778
9779 /*
9780 * State init.
9781 */
9782 PIEMCPU pIemCpu = &pVCpu->iem.s;
9783 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
9784
9785 /*
9786 * Switch orgy for getting to the right handler.
9787 */
9788 VBOXSTRICTRC rcStrict;
9789 if (fRepPrefix)
9790 {
9791 switch (enmAddrMode)
9792 {
9793 case IEMMODE_16BIT:
9794 switch (cbValue)
9795 {
9796 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9797 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9798 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9799 default:
9800 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9801 }
9802 break;
9803
9804 case IEMMODE_32BIT:
9805 switch (cbValue)
9806 {
9807 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9808 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9809 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9810 default:
9811 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9812 }
9813 break;
9814
9815 case IEMMODE_64BIT:
9816 switch (cbValue)
9817 {
9818 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9819 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9820 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9821 default:
9822 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9823 }
9824 break;
9825
9826 default:
9827 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9828 }
9829 }
9830 else
9831 {
9832 switch (enmAddrMode)
9833 {
9834 case IEMMODE_16BIT:
9835 switch (cbValue)
9836 {
9837 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9838 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9839 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9840 default:
9841 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9842 }
9843 break;
9844
9845 case IEMMODE_32BIT:
9846 switch (cbValue)
9847 {
9848 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9849 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9850 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9851 default:
9852 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9853 }
9854 break;
9855
9856 case IEMMODE_64BIT:
9857 switch (cbValue)
9858 {
9859 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9860 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9861 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9862 default:
9863 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9864 }
9865 break;
9866
9867 default:
9868 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9869 }
9870 }
9871
9872 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9873}
9874
9875
9876/**
9877 * Interface for HM and EM for executing string I/O IN (read) instructions.
9878 *
9879 * This API ASSUMES that the caller has already verified that the guest code is
9880 * allowed to access the I/O port. (The I/O port is in the DX register in the
9881 * guest state.)
9882 *
9883 * @returns Strict VBox status code.
9884 * @param pVCpu The cross context per virtual CPU structure.
9885 * @param cbValue The size of the I/O port access (1, 2, or 4).
9886 * @param enmAddrMode The addressing mode.
9887 * @param fRepPrefix Indicates whether a repeat prefix is used
9888 * (doesn't matter which for this instruction).
9889 * @param cbInstr The instruction length in bytes.
9890 */
9891VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9892 bool fRepPrefix, uint8_t cbInstr)
9893{
9894 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
9895
9896 /*
9897 * State init.
9898 */
9899 PIEMCPU pIemCpu = &pVCpu->iem.s;
9900 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
9901
9902 /*
9903 * Switch orgy for getting to the right handler.
9904 */
9905 VBOXSTRICTRC rcStrict;
9906 if (fRepPrefix)
9907 {
9908 switch (enmAddrMode)
9909 {
9910 case IEMMODE_16BIT:
9911 switch (cbValue)
9912 {
9913 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9914 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9915 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9916 default:
9917 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9918 }
9919 break;
9920
9921 case IEMMODE_32BIT:
9922 switch (cbValue)
9923 {
9924 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9925 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9926 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9927 default:
9928 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9929 }
9930 break;
9931
9932 case IEMMODE_64BIT:
9933 switch (cbValue)
9934 {
9935 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9936 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9937 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9938 default:
9939 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9940 }
9941 break;
9942
9943 default:
9944 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9945 }
9946 }
9947 else
9948 {
9949 switch (enmAddrMode)
9950 {
9951 case IEMMODE_16BIT:
9952 switch (cbValue)
9953 {
9954 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9955 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9956 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9957 default:
9958 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9959 }
9960 break;
9961
9962 case IEMMODE_32BIT:
9963 switch (cbValue)
9964 {
9965 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9966 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9967 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9968 default:
9969 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9970 }
9971 break;
9972
9973 case IEMMODE_64BIT:
9974 switch (cbValue)
9975 {
9976 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9977 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9978 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9979 default:
9980 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9981 }
9982 break;
9983
9984 default:
9985 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9986 }
9987 }
9988
9989 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9990}
9991
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette