VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 47535

Last change on this file since 47535 was 47494, checked in by vboxsync, 12 years ago

IEM: Implemented expand down limit checks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 362.2 KB
Line 
1/* $Id: IEMAll.cpp 47494 2013-07-31 14:49:52Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pdm.h>
86#include <VBox/vmm/pgm.h>
87#include <internal/pgm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/em.h>
90#include <VBox/vmm/hm.h>
91#include <VBox/vmm/tm.h>
92#include <VBox/vmm/dbgf.h>
93#ifdef VBOX_WITH_RAW_MODE_NOT_R0
94# include <VBox/vmm/patm.h>
95#endif
96#include "IEMInternal.h"
97#ifdef IEM_VERIFICATION_MODE_FULL
98# include <VBox/vmm/rem.h>
99# include <VBox/vmm/mm.h>
100#endif
101#include <VBox/vmm/vm.h>
102#include <VBox/log.h>
103#include <VBox/err.h>
104#include <VBox/param.h>
105#include <VBox/dis.h>
106#include <VBox/disopcode.h>
107#include <iprt/assert.h>
108#include <iprt/string.h>
109#include <iprt/x86.h>
110
111
112
113/*******************************************************************************
114* Structures and Typedefs *
115*******************************************************************************/
116/** @typedef PFNIEMOP
117 * Pointer to an opcode decoder function.
118 */
119
120/** @def FNIEMOP_DEF
121 * Define an opcode decoder function.
122 *
123 * We're using macors for this so that adding and removing parameters as well as
124 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
125 *
126 * @param a_Name The function name.
127 */
128
129
130#if defined(__GNUC__) && defined(RT_ARCH_X86)
131typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
132# define FNIEMOP_DEF(a_Name) \
133 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
134# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
135 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
136# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
137 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
138
139#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
147
148#elif defined(__GNUC__)
149typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
156
157#else
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
165
166#endif
167
168
169/**
170 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
171 */
172typedef union IEMSELDESC
173{
174 /** The legacy view. */
175 X86DESC Legacy;
176 /** The long mode view. */
177 X86DESC64 Long;
178} IEMSELDESC;
179/** Pointer to a selector descriptor table entry. */
180typedef IEMSELDESC *PIEMSELDESC;
181
182
183/*******************************************************************************
184* Defined Constants And Macros *
185*******************************************************************************/
186/** @name IEM status codes.
187 *
188 * Not quite sure how this will play out in the end, just aliasing safe status
189 * codes for now.
190 *
191 * @{ */
192#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
193/** @} */
194
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in long mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in real mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
286 */
287#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
288
289/**
290 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
291 */
292#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
293
294/**
295 * Tests if at least on of the specified AMD CPUID features (extended) are
296 * marked present.
297 */
298#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
299
300/**
301 * Checks if an Intel CPUID feature is present.
302 */
303#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
304 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
305 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
306
307/**
308 * Checks if an Intel CPUID feature is present.
309 */
310#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
311 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
312
313/**
314 * Checks if an Intel CPUID feature is present in the host CPU.
315 */
316#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
317 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
318
319/**
320 * Evaluates to true if we're presenting an Intel CPU to the guest.
321 */
322#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
323
324/**
325 * Evaluates to true if we're presenting an AMD CPU to the guest.
326 */
327#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
328
329/**
330 * Check if the address is canonical.
331 */
332#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
333
334
335/*******************************************************************************
336* Global Variables *
337*******************************************************************************/
338extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
339
340
341/** Function table for the ADD instruction. */
342static const IEMOPBINSIZES g_iemAImpl_add =
343{
344 iemAImpl_add_u8, iemAImpl_add_u8_locked,
345 iemAImpl_add_u16, iemAImpl_add_u16_locked,
346 iemAImpl_add_u32, iemAImpl_add_u32_locked,
347 iemAImpl_add_u64, iemAImpl_add_u64_locked
348};
349
350/** Function table for the ADC instruction. */
351static const IEMOPBINSIZES g_iemAImpl_adc =
352{
353 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
354 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
355 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
356 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
357};
358
359/** Function table for the SUB instruction. */
360static const IEMOPBINSIZES g_iemAImpl_sub =
361{
362 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
363 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
364 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
365 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
366};
367
368/** Function table for the SBB instruction. */
369static const IEMOPBINSIZES g_iemAImpl_sbb =
370{
371 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
372 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
373 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
374 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
375};
376
377/** Function table for the OR instruction. */
378static const IEMOPBINSIZES g_iemAImpl_or =
379{
380 iemAImpl_or_u8, iemAImpl_or_u8_locked,
381 iemAImpl_or_u16, iemAImpl_or_u16_locked,
382 iemAImpl_or_u32, iemAImpl_or_u32_locked,
383 iemAImpl_or_u64, iemAImpl_or_u64_locked
384};
385
386/** Function table for the XOR instruction. */
387static const IEMOPBINSIZES g_iemAImpl_xor =
388{
389 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
390 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
391 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
392 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
393};
394
395/** Function table for the AND instruction. */
396static const IEMOPBINSIZES g_iemAImpl_and =
397{
398 iemAImpl_and_u8, iemAImpl_and_u8_locked,
399 iemAImpl_and_u16, iemAImpl_and_u16_locked,
400 iemAImpl_and_u32, iemAImpl_and_u32_locked,
401 iemAImpl_and_u64, iemAImpl_and_u64_locked
402};
403
404/** Function table for the CMP instruction.
405 * @remarks Making operand order ASSUMPTIONS.
406 */
407static const IEMOPBINSIZES g_iemAImpl_cmp =
408{
409 iemAImpl_cmp_u8, NULL,
410 iemAImpl_cmp_u16, NULL,
411 iemAImpl_cmp_u32, NULL,
412 iemAImpl_cmp_u64, NULL
413};
414
415/** Function table for the TEST instruction.
416 * @remarks Making operand order ASSUMPTIONS.
417 */
418static const IEMOPBINSIZES g_iemAImpl_test =
419{
420 iemAImpl_test_u8, NULL,
421 iemAImpl_test_u16, NULL,
422 iemAImpl_test_u32, NULL,
423 iemAImpl_test_u64, NULL
424};
425
426/** Function table for the BT instruction. */
427static const IEMOPBINSIZES g_iemAImpl_bt =
428{
429 NULL, NULL,
430 iemAImpl_bt_u16, NULL,
431 iemAImpl_bt_u32, NULL,
432 iemAImpl_bt_u64, NULL
433};
434
435/** Function table for the BTC instruction. */
436static const IEMOPBINSIZES g_iemAImpl_btc =
437{
438 NULL, NULL,
439 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
440 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
441 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
442};
443
444/** Function table for the BTR instruction. */
445static const IEMOPBINSIZES g_iemAImpl_btr =
446{
447 NULL, NULL,
448 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
449 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
450 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
451};
452
453/** Function table for the BTS instruction. */
454static const IEMOPBINSIZES g_iemAImpl_bts =
455{
456 NULL, NULL,
457 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
458 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
459 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
460};
461
462/** Function table for the BSF instruction. */
463static const IEMOPBINSIZES g_iemAImpl_bsf =
464{
465 NULL, NULL,
466 iemAImpl_bsf_u16, NULL,
467 iemAImpl_bsf_u32, NULL,
468 iemAImpl_bsf_u64, NULL
469};
470
471/** Function table for the BSR instruction. */
472static const IEMOPBINSIZES g_iemAImpl_bsr =
473{
474 NULL, NULL,
475 iemAImpl_bsr_u16, NULL,
476 iemAImpl_bsr_u32, NULL,
477 iemAImpl_bsr_u64, NULL
478};
479
480/** Function table for the IMUL instruction. */
481static const IEMOPBINSIZES g_iemAImpl_imul_two =
482{
483 NULL, NULL,
484 iemAImpl_imul_two_u16, NULL,
485 iemAImpl_imul_two_u32, NULL,
486 iemAImpl_imul_two_u64, NULL
487};
488
489/** Group 1 /r lookup table. */
490static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
491{
492 &g_iemAImpl_add,
493 &g_iemAImpl_or,
494 &g_iemAImpl_adc,
495 &g_iemAImpl_sbb,
496 &g_iemAImpl_and,
497 &g_iemAImpl_sub,
498 &g_iemAImpl_xor,
499 &g_iemAImpl_cmp
500};
501
502/** Function table for the INC instruction. */
503static const IEMOPUNARYSIZES g_iemAImpl_inc =
504{
505 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
506 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
507 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
508 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
509};
510
511/** Function table for the DEC instruction. */
512static const IEMOPUNARYSIZES g_iemAImpl_dec =
513{
514 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
515 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
516 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
517 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
518};
519
520/** Function table for the NEG instruction. */
521static const IEMOPUNARYSIZES g_iemAImpl_neg =
522{
523 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
524 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
525 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
526 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
527};
528
529/** Function table for the NOT instruction. */
530static const IEMOPUNARYSIZES g_iemAImpl_not =
531{
532 iemAImpl_not_u8, iemAImpl_not_u8_locked,
533 iemAImpl_not_u16, iemAImpl_not_u16_locked,
534 iemAImpl_not_u32, iemAImpl_not_u32_locked,
535 iemAImpl_not_u64, iemAImpl_not_u64_locked
536};
537
538
539/** Function table for the ROL instruction. */
540static const IEMOPSHIFTSIZES g_iemAImpl_rol =
541{
542 iemAImpl_rol_u8,
543 iemAImpl_rol_u16,
544 iemAImpl_rol_u32,
545 iemAImpl_rol_u64
546};
547
548/** Function table for the ROR instruction. */
549static const IEMOPSHIFTSIZES g_iemAImpl_ror =
550{
551 iemAImpl_ror_u8,
552 iemAImpl_ror_u16,
553 iemAImpl_ror_u32,
554 iemAImpl_ror_u64
555};
556
557/** Function table for the RCL instruction. */
558static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
559{
560 iemAImpl_rcl_u8,
561 iemAImpl_rcl_u16,
562 iemAImpl_rcl_u32,
563 iemAImpl_rcl_u64
564};
565
566/** Function table for the RCR instruction. */
567static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
568{
569 iemAImpl_rcr_u8,
570 iemAImpl_rcr_u16,
571 iemAImpl_rcr_u32,
572 iemAImpl_rcr_u64
573};
574
575/** Function table for the SHL instruction. */
576static const IEMOPSHIFTSIZES g_iemAImpl_shl =
577{
578 iemAImpl_shl_u8,
579 iemAImpl_shl_u16,
580 iemAImpl_shl_u32,
581 iemAImpl_shl_u64
582};
583
584/** Function table for the SHR instruction. */
585static const IEMOPSHIFTSIZES g_iemAImpl_shr =
586{
587 iemAImpl_shr_u8,
588 iemAImpl_shr_u16,
589 iemAImpl_shr_u32,
590 iemAImpl_shr_u64
591};
592
593/** Function table for the SAR instruction. */
594static const IEMOPSHIFTSIZES g_iemAImpl_sar =
595{
596 iemAImpl_sar_u8,
597 iemAImpl_sar_u16,
598 iemAImpl_sar_u32,
599 iemAImpl_sar_u64
600};
601
602
603/** Function table for the MUL instruction. */
604static const IEMOPMULDIVSIZES g_iemAImpl_mul =
605{
606 iemAImpl_mul_u8,
607 iemAImpl_mul_u16,
608 iemAImpl_mul_u32,
609 iemAImpl_mul_u64
610};
611
612/** Function table for the IMUL instruction working implicitly on rAX. */
613static const IEMOPMULDIVSIZES g_iemAImpl_imul =
614{
615 iemAImpl_imul_u8,
616 iemAImpl_imul_u16,
617 iemAImpl_imul_u32,
618 iemAImpl_imul_u64
619};
620
621/** Function table for the DIV instruction. */
622static const IEMOPMULDIVSIZES g_iemAImpl_div =
623{
624 iemAImpl_div_u8,
625 iemAImpl_div_u16,
626 iemAImpl_div_u32,
627 iemAImpl_div_u64
628};
629
630/** Function table for the MUL instruction. */
631static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
632{
633 iemAImpl_idiv_u8,
634 iemAImpl_idiv_u16,
635 iemAImpl_idiv_u32,
636 iemAImpl_idiv_u64
637};
638
639/** Function table for the SHLD instruction */
640static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
641{
642 iemAImpl_shld_u16,
643 iemAImpl_shld_u32,
644 iemAImpl_shld_u64,
645};
646
647/** Function table for the SHRD instruction */
648static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
649{
650 iemAImpl_shrd_u16,
651 iemAImpl_shrd_u32,
652 iemAImpl_shrd_u64,
653};
654
655
656/** Function table for the PUNPCKLBW instruction */
657static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
658/** Function table for the PUNPCKLBD instruction */
659static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
660/** Function table for the PUNPCKLDQ instruction */
661static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
662/** Function table for the PUNPCKLQDQ instruction */
663static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
664
665/** Function table for the PUNPCKHBW instruction */
666static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
667/** Function table for the PUNPCKHBD instruction */
668static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
669/** Function table for the PUNPCKHDQ instruction */
670static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
671/** Function table for the PUNPCKHQDQ instruction */
672static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
673
674/** Function table for the PXOR instruction */
675static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
676/** Function table for the PCMPEQB instruction */
677static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
678/** Function table for the PCMPEQW instruction */
679static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
680/** Function table for the PCMPEQD instruction */
681static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
682
683
684#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
685/** What IEM just wrote. */
686uint8_t g_abIemWrote[256];
687/** How much IEM just wrote. */
688size_t g_cbIemWrote;
689#endif
690
691
692/*******************************************************************************
693* Internal Functions *
694*******************************************************************************/
695static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
696/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
697static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
698static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
699static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
700static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
701static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
702static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
703static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
704static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
705static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
706static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
707static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
708static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
709static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
710static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
711static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
712static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
713static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
714static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
715static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
716static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
717static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
718static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
719static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
720static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
721
722#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
723static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
724#endif
725static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
726static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
727
728
729/**
730 * Sets the pass up status.
731 *
732 * @returns VINF_SUCCESS.
733 * @param pIemCpu The per CPU IEM state of the calling thread.
734 * @param rcPassUp The pass up status. Must be informational.
735 * VINF_SUCCESS is not allowed.
736 */
737static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
738{
739 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
740
741 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
742 if (rcOldPassUp == VINF_SUCCESS)
743 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
744 /* If both are EM scheduling codes, use EM priority rules. */
745 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
746 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
747 {
748 if (rcPassUp < rcOldPassUp)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 else
754 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 }
756 /* Override EM scheduling with specific status code. */
757 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
758 {
759 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
761 }
762 /* Don't override specific status code, first come first served. */
763 else
764 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
765 return VINF_SUCCESS;
766}
767
768
769/**
770 * Initializes the execution state.
771 *
772 * @param pIemCpu The per CPU IEM state.
773 * @param fBypassHandlers Whether to bypass access handlers.
774 */
775DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
776{
777 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
778 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
779
780#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
788 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
789#endif
790
791#ifdef VBOX_WITH_RAW_MODE_NOT_R0
792 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
793#endif
794 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
795 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
796 ? IEMMODE_64BIT
797 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
798 ? IEMMODE_32BIT
799 : IEMMODE_16BIT;
800 pIemCpu->enmCpuMode = enmMode;
801#ifdef VBOX_STRICT
802 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
803 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
804 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
805 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
806 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
807 pIemCpu->uRexReg = 127;
808 pIemCpu->uRexB = 127;
809 pIemCpu->uRexIndex = 127;
810 pIemCpu->iEffSeg = 127;
811 pIemCpu->offOpcode = 127;
812 pIemCpu->cbOpcode = 127;
813#endif
814
815 pIemCpu->cActiveMappings = 0;
816 pIemCpu->iNextMapping = 0;
817 pIemCpu->rcPassUp = VINF_SUCCESS;
818 pIemCpu->fBypassHandlers = fBypassHandlers;
819#ifdef VBOX_WITH_RAW_MODE_NOT_R0
820 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
821 && pCtx->cs.u64Base == 0
822 && pCtx->cs.u32Limit == UINT32_MAX
823 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
824 if (!pIemCpu->fInPatchCode)
825 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
826#endif
827}
828
829
830/**
831 * Initializes the decoder state.
832 *
833 * @param pIemCpu The per CPU IEM state.
834 * @param fBypassHandlers Whether to bypass access handlers.
835 */
836DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
837{
838 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
839 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
840
841#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
850#endif
851
852#ifdef VBOX_WITH_RAW_MODE_NOT_R0
853 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
854#endif
855 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
856#ifdef IEM_VERIFICATION_MODE_FULL
857 if (pIemCpu->uInjectCpl != UINT8_MAX)
858 pIemCpu->uCpl = pIemCpu->uInjectCpl;
859#endif
860 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
861 ? IEMMODE_64BIT
862 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
863 ? IEMMODE_32BIT
864 : IEMMODE_16BIT;
865 pIemCpu->enmCpuMode = enmMode;
866 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
867 pIemCpu->enmEffAddrMode = enmMode;
868 if (enmMode != IEMMODE_64BIT)
869 {
870 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
871 pIemCpu->enmEffOpSize = enmMode;
872 }
873 else
874 {
875 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
876 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
877 }
878 pIemCpu->fPrefixes = 0;
879 pIemCpu->uRexReg = 0;
880 pIemCpu->uRexB = 0;
881 pIemCpu->uRexIndex = 0;
882 pIemCpu->iEffSeg = X86_SREG_DS;
883 pIemCpu->offOpcode = 0;
884 pIemCpu->cbOpcode = 0;
885 pIemCpu->cActiveMappings = 0;
886 pIemCpu->iNextMapping = 0;
887 pIemCpu->rcPassUp = VINF_SUCCESS;
888 pIemCpu->fBypassHandlers = fBypassHandlers;
889#ifdef VBOX_WITH_RAW_MODE_NOT_R0
890 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
891 && pCtx->cs.u64Base == 0
892 && pCtx->cs.u32Limit == UINT32_MAX
893 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
894 if (!pIemCpu->fInPatchCode)
895 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
896#endif
897}
898
899
900/**
901 * Prefetch opcodes the first time when starting executing.
902 *
903 * @returns Strict VBox status code.
904 * @param pIemCpu The IEM state.
905 * @param fBypassHandlers Whether to bypass access handlers.
906 */
907static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
908{
909#ifdef IEM_VERIFICATION_MODE_FULL
910 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
911#endif
912 iemInitDecoder(pIemCpu, fBypassHandlers);
913
914 /*
915 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
916 *
917 * First translate CS:rIP to a physical address.
918 */
919 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
920 uint32_t cbToTryRead;
921 RTGCPTR GCPtrPC;
922 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
923 {
924 cbToTryRead = PAGE_SIZE;
925 GCPtrPC = pCtx->rip;
926 if (!IEM_IS_CANONICAL(GCPtrPC))
927 return iemRaiseGeneralProtectionFault0(pIemCpu);
928 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
929 }
930 else
931 {
932 uint32_t GCPtrPC32 = pCtx->eip;
933 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
934 if (GCPtrPC32 > pCtx->cs.u32Limit)
935 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
936 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
937 if (!cbToTryRead) /* overflowed */
938 {
939 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
940 cbToTryRead = UINT32_MAX;
941 }
942 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
943 }
944
945#ifdef VBOX_WITH_RAW_MODE_NOT_R0
946 /* Allow interpretation of patch manager code blocks since they can for
947 instance throw #PFs for perfectly good reasons. */
948 if (pIemCpu->fInPatchCode)
949 {
950 size_t cbRead = 0;
951 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
952 AssertRCReturn(rc, rc);
953 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
954 return VINF_SUCCESS;
955 }
956#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
957
958 RTGCPHYS GCPhys;
959 uint64_t fFlags;
960 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
961 if (RT_FAILURE(rc))
962 {
963 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
964 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
965 }
966 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
967 {
968 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
969 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
970 }
971 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
972 {
973 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
974 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
975 }
976 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
977 /** @todo Check reserved bits and such stuff. PGM is better at doing
978 * that, so do it when implementing the guest virtual address
979 * TLB... */
980
981#ifdef IEM_VERIFICATION_MODE_FULL
982 /*
983 * Optimistic optimization: Use unconsumed opcode bytes from the previous
984 * instruction.
985 */
986 /** @todo optimize this differently by not using PGMPhysRead. */
987 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
988 pIemCpu->GCPhysOpcodes = GCPhys;
989 if ( offPrevOpcodes < cbOldOpcodes
990 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
991 {
992 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
993 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
994 pIemCpu->cbOpcode = cbNew;
995 return VINF_SUCCESS;
996 }
997#endif
998
999 /*
1000 * Read the bytes at this address.
1001 */
1002 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1003 if (cbToTryRead > cbLeftOnPage)
1004 cbToTryRead = cbLeftOnPage;
1005 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1006 cbToTryRead = sizeof(pIemCpu->abOpcode);
1007 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
1008 * doing that. */
1009 if (!pIemCpu->fBypassHandlers)
1010 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
1011 else
1012 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
1013 if (rc != VINF_SUCCESS)
1014 {
1015 /** @todo status code handling */
1016 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1017 GCPtrPC, GCPhys, rc, cbToTryRead));
1018 return rc;
1019 }
1020 pIemCpu->cbOpcode = cbToTryRead;
1021
1022 return VINF_SUCCESS;
1023}
1024
1025
1026/**
1027 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1028 * exception if it fails.
1029 *
1030 * @returns Strict VBox status code.
1031 * @param pIemCpu The IEM state.
1032 * @param cbMin The minimum number of bytes relative offOpcode
1033 * that must be read.
1034 */
1035static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1036{
1037 /*
1038 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1039 *
1040 * First translate CS:rIP to a physical address.
1041 */
1042 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1043 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1044 uint32_t cbToTryRead;
1045 RTGCPTR GCPtrNext;
1046 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1047 {
1048 cbToTryRead = PAGE_SIZE;
1049 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1050 if (!IEM_IS_CANONICAL(GCPtrNext))
1051 return iemRaiseGeneralProtectionFault0(pIemCpu);
1052 }
1053 else
1054 {
1055 uint32_t GCPtrNext32 = pCtx->eip;
1056 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1057 GCPtrNext32 += pIemCpu->cbOpcode;
1058 if (GCPtrNext32 > pCtx->cs.u32Limit)
1059 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1060 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1061 if (!cbToTryRead) /* overflowed */
1062 {
1063 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1064 cbToTryRead = UINT32_MAX;
1065 /** @todo check out wrapping around the code segment. */
1066 }
1067 if (cbToTryRead < cbMin - cbLeft)
1068 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1069 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
1070 }
1071
1072 /* Only read up to the end of the page, and make sure we don't read more
1073 than the opcode buffer can hold. */
1074 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1075 if (cbToTryRead > cbLeftOnPage)
1076 cbToTryRead = cbLeftOnPage;
1077 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1078 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1079 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1080
1081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1082 /* Allow interpretation of patch manager code blocks since they can for
1083 instance throw #PFs for perfectly good reasons. */
1084 if (pIemCpu->fInPatchCode)
1085 {
1086 size_t cbRead = 0;
1087 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1088 AssertRCReturn(rc, rc);
1089 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1090 return VINF_SUCCESS;
1091 }
1092#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1093
1094 RTGCPHYS GCPhys;
1095 uint64_t fFlags;
1096 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1097 if (RT_FAILURE(rc))
1098 {
1099 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1100 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1101 }
1102 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1103 {
1104 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1105 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1106 }
1107 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1108 {
1109 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1110 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1111 }
1112 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1113 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1114 /** @todo Check reserved bits and such stuff. PGM is better at doing
1115 * that, so do it when implementing the guest virtual address
1116 * TLB... */
1117
1118 /*
1119 * Read the bytes at this address.
1120 */
1121 if (!pIemCpu->fBypassHandlers)
1122 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1123 else
1124 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1125 if (rc != VINF_SUCCESS)
1126 {
1127 /** @todo status code handling */
1128 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1129 return rc;
1130 }
1131 pIemCpu->cbOpcode += cbToTryRead;
1132 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1133
1134 return VINF_SUCCESS;
1135}
1136
1137
1138/**
1139 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1140 *
1141 * @returns Strict VBox status code.
1142 * @param pIemCpu The IEM state.
1143 * @param pb Where to return the opcode byte.
1144 */
1145DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1146{
1147 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1148 if (rcStrict == VINF_SUCCESS)
1149 {
1150 uint8_t offOpcode = pIemCpu->offOpcode;
1151 *pb = pIemCpu->abOpcode[offOpcode];
1152 pIemCpu->offOpcode = offOpcode + 1;
1153 }
1154 else
1155 *pb = 0;
1156 return rcStrict;
1157}
1158
1159
1160/**
1161 * Fetches the next opcode byte.
1162 *
1163 * @returns Strict VBox status code.
1164 * @param pIemCpu The IEM state.
1165 * @param pu8 Where to return the opcode byte.
1166 */
1167DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1168{
1169 uint8_t const offOpcode = pIemCpu->offOpcode;
1170 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1171 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1172
1173 *pu8 = pIemCpu->abOpcode[offOpcode];
1174 pIemCpu->offOpcode = offOpcode + 1;
1175 return VINF_SUCCESS;
1176}
1177
1178
1179/**
1180 * Fetches the next opcode byte, returns automatically on failure.
1181 *
1182 * @param a_pu8 Where to return the opcode byte.
1183 * @remark Implicitly references pIemCpu.
1184 */
1185#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1186 do \
1187 { \
1188 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1189 if (rcStrict2 != VINF_SUCCESS) \
1190 return rcStrict2; \
1191 } while (0)
1192
1193
1194/**
1195 * Fetches the next signed byte from the opcode stream.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pIemCpu The IEM state.
1199 * @param pi8 Where to return the signed byte.
1200 */
1201DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1202{
1203 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1204}
1205
1206
1207/**
1208 * Fetches the next signed byte from the opcode stream, returning automatically
1209 * on failure.
1210 *
1211 * @param pi8 Where to return the signed byte.
1212 * @remark Implicitly references pIemCpu.
1213 */
1214#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1215 do \
1216 { \
1217 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1218 if (rcStrict2 != VINF_SUCCESS) \
1219 return rcStrict2; \
1220 } while (0)
1221
1222
1223/**
1224 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pIemCpu The IEM state.
1228 * @param pu16 Where to return the opcode dword.
1229 */
1230DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1231{
1232 uint8_t u8;
1233 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1234 if (rcStrict == VINF_SUCCESS)
1235 *pu16 = (int8_t)u8;
1236 return rcStrict;
1237}
1238
1239
1240/**
1241 * Fetches the next signed byte from the opcode stream, extending it to
1242 * unsigned 16-bit.
1243 *
1244 * @returns Strict VBox status code.
1245 * @param pIemCpu The IEM state.
1246 * @param pu16 Where to return the unsigned word.
1247 */
1248DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1249{
1250 uint8_t const offOpcode = pIemCpu->offOpcode;
1251 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1252 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1253
1254 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1255 pIemCpu->offOpcode = offOpcode + 1;
1256 return VINF_SUCCESS;
1257}
1258
1259
1260/**
1261 * Fetches the next signed byte from the opcode stream and sign-extending it to
1262 * a word, returning automatically on failure.
1263 *
1264 * @param pu16 Where to return the word.
1265 * @remark Implicitly references pIemCpu.
1266 */
1267#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1268 do \
1269 { \
1270 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1271 if (rcStrict2 != VINF_SUCCESS) \
1272 return rcStrict2; \
1273 } while (0)
1274
1275
1276/**
1277 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1278 *
1279 * @returns Strict VBox status code.
1280 * @param pIemCpu The IEM state.
1281 * @param pu32 Where to return the opcode dword.
1282 */
1283DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1284{
1285 uint8_t u8;
1286 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1287 if (rcStrict == VINF_SUCCESS)
1288 *pu32 = (int8_t)u8;
1289 return rcStrict;
1290}
1291
1292
1293/**
1294 * Fetches the next signed byte from the opcode stream, extending it to
1295 * unsigned 32-bit.
1296 *
1297 * @returns Strict VBox status code.
1298 * @param pIemCpu The IEM state.
1299 * @param pu32 Where to return the unsigned dword.
1300 */
1301DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1302{
1303 uint8_t const offOpcode = pIemCpu->offOpcode;
1304 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1305 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1306
1307 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1308 pIemCpu->offOpcode = offOpcode + 1;
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Fetches the next signed byte from the opcode stream and sign-extending it to
1315 * a word, returning automatically on failure.
1316 *
1317 * @param pu32 Where to return the word.
1318 * @remark Implicitly references pIemCpu.
1319 */
1320#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1321 do \
1322 { \
1323 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1324 if (rcStrict2 != VINF_SUCCESS) \
1325 return rcStrict2; \
1326 } while (0)
1327
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pIemCpu The IEM state.
1334 * @param pu64 Where to return the opcode qword.
1335 */
1336DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu64 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Fetches the next signed byte from the opcode stream, extending it to
1348 * unsigned 64-bit.
1349 *
1350 * @returns Strict VBox status code.
1351 * @param pIemCpu The IEM state.
1352 * @param pu64 Where to return the unsigned qword.
1353 */
1354DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1355{
1356 uint8_t const offOpcode = pIemCpu->offOpcode;
1357 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1358 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1359
1360 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1361 pIemCpu->offOpcode = offOpcode + 1;
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Fetches the next signed byte from the opcode stream and sign-extending it to
1368 * a word, returning automatically on failure.
1369 *
1370 * @param pu64 Where to return the word.
1371 * @remark Implicitly references pIemCpu.
1372 */
1373#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1374 do \
1375 { \
1376 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1377 if (rcStrict2 != VINF_SUCCESS) \
1378 return rcStrict2; \
1379 } while (0)
1380
1381
1382/**
1383 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1384 *
1385 * @returns Strict VBox status code.
1386 * @param pIemCpu The IEM state.
1387 * @param pu16 Where to return the opcode word.
1388 */
1389DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1390{
1391 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1392 if (rcStrict == VINF_SUCCESS)
1393 {
1394 uint8_t offOpcode = pIemCpu->offOpcode;
1395 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1396 pIemCpu->offOpcode = offOpcode + 2;
1397 }
1398 else
1399 *pu16 = 0;
1400 return rcStrict;
1401}
1402
1403
1404/**
1405 * Fetches the next opcode word.
1406 *
1407 * @returns Strict VBox status code.
1408 * @param pIemCpu The IEM state.
1409 * @param pu16 Where to return the opcode word.
1410 */
1411DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1412{
1413 uint8_t const offOpcode = pIemCpu->offOpcode;
1414 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1415 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1416
1417 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1418 pIemCpu->offOpcode = offOpcode + 2;
1419 return VINF_SUCCESS;
1420}
1421
1422
1423/**
1424 * Fetches the next opcode word, returns automatically on failure.
1425 *
1426 * @param a_pu16 Where to return the opcode word.
1427 * @remark Implicitly references pIemCpu.
1428 */
1429#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1430 do \
1431 { \
1432 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1433 if (rcStrict2 != VINF_SUCCESS) \
1434 return rcStrict2; \
1435 } while (0)
1436
1437
1438/**
1439 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1440 *
1441 * @returns Strict VBox status code.
1442 * @param pIemCpu The IEM state.
1443 * @param pu32 Where to return the opcode double word.
1444 */
1445DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1446{
1447 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1448 if (rcStrict == VINF_SUCCESS)
1449 {
1450 uint8_t offOpcode = pIemCpu->offOpcode;
1451 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1452 pIemCpu->offOpcode = offOpcode + 2;
1453 }
1454 else
1455 *pu32 = 0;
1456 return rcStrict;
1457}
1458
1459
1460/**
1461 * Fetches the next opcode word, zero extending it to a double word.
1462 *
1463 * @returns Strict VBox status code.
1464 * @param pIemCpu The IEM state.
1465 * @param pu32 Where to return the opcode double word.
1466 */
1467DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1468{
1469 uint8_t const offOpcode = pIemCpu->offOpcode;
1470 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1471 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1472
1473 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1474 pIemCpu->offOpcode = offOpcode + 2;
1475 return VINF_SUCCESS;
1476}
1477
1478
1479/**
1480 * Fetches the next opcode word and zero extends it to a double word, returns
1481 * automatically on failure.
1482 *
1483 * @param a_pu32 Where to return the opcode double word.
1484 * @remark Implicitly references pIemCpu.
1485 */
1486#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1487 do \
1488 { \
1489 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1490 if (rcStrict2 != VINF_SUCCESS) \
1491 return rcStrict2; \
1492 } while (0)
1493
1494
1495/**
1496 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1497 *
1498 * @returns Strict VBox status code.
1499 * @param pIemCpu The IEM state.
1500 * @param pu64 Where to return the opcode quad word.
1501 */
1502DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1503{
1504 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1505 if (rcStrict == VINF_SUCCESS)
1506 {
1507 uint8_t offOpcode = pIemCpu->offOpcode;
1508 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1509 pIemCpu->offOpcode = offOpcode + 2;
1510 }
1511 else
1512 *pu64 = 0;
1513 return rcStrict;
1514}
1515
1516
1517/**
1518 * Fetches the next opcode word, zero extending it to a quad word.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pIemCpu The IEM state.
1522 * @param pu64 Where to return the opcode quad word.
1523 */
1524DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1525{
1526 uint8_t const offOpcode = pIemCpu->offOpcode;
1527 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1528 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1529
1530 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1531 pIemCpu->offOpcode = offOpcode + 2;
1532 return VINF_SUCCESS;
1533}
1534
1535
1536/**
1537 * Fetches the next opcode word and zero extends it to a quad word, returns
1538 * automatically on failure.
1539 *
1540 * @param a_pu64 Where to return the opcode quad word.
1541 * @remark Implicitly references pIemCpu.
1542 */
1543#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1544 do \
1545 { \
1546 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1547 if (rcStrict2 != VINF_SUCCESS) \
1548 return rcStrict2; \
1549 } while (0)
1550
1551
1552/**
1553 * Fetches the next signed word from the opcode stream.
1554 *
1555 * @returns Strict VBox status code.
1556 * @param pIemCpu The IEM state.
1557 * @param pi16 Where to return the signed word.
1558 */
1559DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1560{
1561 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1562}
1563
1564
1565/**
1566 * Fetches the next signed word from the opcode stream, returning automatically
1567 * on failure.
1568 *
1569 * @param pi16 Where to return the signed word.
1570 * @remark Implicitly references pIemCpu.
1571 */
1572#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1573 do \
1574 { \
1575 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1576 if (rcStrict2 != VINF_SUCCESS) \
1577 return rcStrict2; \
1578 } while (0)
1579
1580
1581/**
1582 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1583 *
1584 * @returns Strict VBox status code.
1585 * @param pIemCpu The IEM state.
1586 * @param pu32 Where to return the opcode dword.
1587 */
1588DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1589{
1590 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1591 if (rcStrict == VINF_SUCCESS)
1592 {
1593 uint8_t offOpcode = pIemCpu->offOpcode;
1594 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1595 pIemCpu->abOpcode[offOpcode + 1],
1596 pIemCpu->abOpcode[offOpcode + 2],
1597 pIemCpu->abOpcode[offOpcode + 3]);
1598 pIemCpu->offOpcode = offOpcode + 4;
1599 }
1600 else
1601 *pu32 = 0;
1602 return rcStrict;
1603}
1604
1605
1606/**
1607 * Fetches the next opcode dword.
1608 *
1609 * @returns Strict VBox status code.
1610 * @param pIemCpu The IEM state.
1611 * @param pu32 Where to return the opcode double word.
1612 */
1613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1614{
1615 uint8_t const offOpcode = pIemCpu->offOpcode;
1616 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1617 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1618
1619 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1620 pIemCpu->abOpcode[offOpcode + 1],
1621 pIemCpu->abOpcode[offOpcode + 2],
1622 pIemCpu->abOpcode[offOpcode + 3]);
1623 pIemCpu->offOpcode = offOpcode + 4;
1624 return VINF_SUCCESS;
1625}
1626
1627
1628/**
1629 * Fetches the next opcode dword, returns automatically on failure.
1630 *
1631 * @param a_pu32 Where to return the opcode dword.
1632 * @remark Implicitly references pIemCpu.
1633 */
1634#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1635 do \
1636 { \
1637 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1638 if (rcStrict2 != VINF_SUCCESS) \
1639 return rcStrict2; \
1640 } while (0)
1641
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pIemCpu The IEM state.
1648 * @param pu32 Where to return the opcode dword.
1649 */
1650DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pIemCpu->offOpcode;
1656 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1657 pIemCpu->abOpcode[offOpcode + 1],
1658 pIemCpu->abOpcode[offOpcode + 2],
1659 pIemCpu->abOpcode[offOpcode + 3]);
1660 pIemCpu->offOpcode = offOpcode + 4;
1661 }
1662 else
1663 *pu64 = 0;
1664 return rcStrict;
1665}
1666
1667
1668/**
1669 * Fetches the next opcode dword, zero extending it to a quad word.
1670 *
1671 * @returns Strict VBox status code.
1672 * @param pIemCpu The IEM state.
1673 * @param pu64 Where to return the opcode quad word.
1674 */
1675DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1676{
1677 uint8_t const offOpcode = pIemCpu->offOpcode;
1678 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1679 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1680
1681 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1682 pIemCpu->abOpcode[offOpcode + 1],
1683 pIemCpu->abOpcode[offOpcode + 2],
1684 pIemCpu->abOpcode[offOpcode + 3]);
1685 pIemCpu->offOpcode = offOpcode + 4;
1686 return VINF_SUCCESS;
1687}
1688
1689
1690/**
1691 * Fetches the next opcode dword and zero extends it to a quad word, returns
1692 * automatically on failure.
1693 *
1694 * @param a_pu64 Where to return the opcode quad word.
1695 * @remark Implicitly references pIemCpu.
1696 */
1697#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1698 do \
1699 { \
1700 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1701 if (rcStrict2 != VINF_SUCCESS) \
1702 return rcStrict2; \
1703 } while (0)
1704
1705
1706/**
1707 * Fetches the next signed double word from the opcode stream.
1708 *
1709 * @returns Strict VBox status code.
1710 * @param pIemCpu The IEM state.
1711 * @param pi32 Where to return the signed double word.
1712 */
1713DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1714{
1715 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1716}
1717
1718/**
1719 * Fetches the next signed double word from the opcode stream, returning
1720 * automatically on failure.
1721 *
1722 * @param pi32 Where to return the signed double word.
1723 * @remark Implicitly references pIemCpu.
1724 */
1725#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1726 do \
1727 { \
1728 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1729 if (rcStrict2 != VINF_SUCCESS) \
1730 return rcStrict2; \
1731 } while (0)
1732
1733
1734/**
1735 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1736 *
1737 * @returns Strict VBox status code.
1738 * @param pIemCpu The IEM state.
1739 * @param pu64 Where to return the opcode qword.
1740 */
1741DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1742{
1743 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1744 if (rcStrict == VINF_SUCCESS)
1745 {
1746 uint8_t offOpcode = pIemCpu->offOpcode;
1747 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1748 pIemCpu->abOpcode[offOpcode + 1],
1749 pIemCpu->abOpcode[offOpcode + 2],
1750 pIemCpu->abOpcode[offOpcode + 3]);
1751 pIemCpu->offOpcode = offOpcode + 4;
1752 }
1753 else
1754 *pu64 = 0;
1755 return rcStrict;
1756}
1757
1758
1759/**
1760 * Fetches the next opcode dword, sign extending it into a quad word.
1761 *
1762 * @returns Strict VBox status code.
1763 * @param pIemCpu The IEM state.
1764 * @param pu64 Where to return the opcode quad word.
1765 */
1766DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1767{
1768 uint8_t const offOpcode = pIemCpu->offOpcode;
1769 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1770 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1771
1772 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1773 pIemCpu->abOpcode[offOpcode + 1],
1774 pIemCpu->abOpcode[offOpcode + 2],
1775 pIemCpu->abOpcode[offOpcode + 3]);
1776 *pu64 = i32;
1777 pIemCpu->offOpcode = offOpcode + 4;
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Fetches the next opcode double word and sign extends it to a quad word,
1784 * returns automatically on failure.
1785 *
1786 * @param a_pu64 Where to return the opcode quad word.
1787 * @remark Implicitly references pIemCpu.
1788 */
1789#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1790 do \
1791 { \
1792 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1793 if (rcStrict2 != VINF_SUCCESS) \
1794 return rcStrict2; \
1795 } while (0)
1796
1797
1798/**
1799 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1800 *
1801 * @returns Strict VBox status code.
1802 * @param pIemCpu The IEM state.
1803 * @param pu64 Where to return the opcode qword.
1804 */
1805DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1806{
1807 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1808 if (rcStrict == VINF_SUCCESS)
1809 {
1810 uint8_t offOpcode = pIemCpu->offOpcode;
1811 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1812 pIemCpu->abOpcode[offOpcode + 1],
1813 pIemCpu->abOpcode[offOpcode + 2],
1814 pIemCpu->abOpcode[offOpcode + 3],
1815 pIemCpu->abOpcode[offOpcode + 4],
1816 pIemCpu->abOpcode[offOpcode + 5],
1817 pIemCpu->abOpcode[offOpcode + 6],
1818 pIemCpu->abOpcode[offOpcode + 7]);
1819 pIemCpu->offOpcode = offOpcode + 8;
1820 }
1821 else
1822 *pu64 = 0;
1823 return rcStrict;
1824}
1825
1826
1827/**
1828 * Fetches the next opcode qword.
1829 *
1830 * @returns Strict VBox status code.
1831 * @param pIemCpu The IEM state.
1832 * @param pu64 Where to return the opcode qword.
1833 */
1834DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1835{
1836 uint8_t const offOpcode = pIemCpu->offOpcode;
1837 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1838 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1839
1840 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1841 pIemCpu->abOpcode[offOpcode + 1],
1842 pIemCpu->abOpcode[offOpcode + 2],
1843 pIemCpu->abOpcode[offOpcode + 3],
1844 pIemCpu->abOpcode[offOpcode + 4],
1845 pIemCpu->abOpcode[offOpcode + 5],
1846 pIemCpu->abOpcode[offOpcode + 6],
1847 pIemCpu->abOpcode[offOpcode + 7]);
1848 pIemCpu->offOpcode = offOpcode + 8;
1849 return VINF_SUCCESS;
1850}
1851
1852
1853/**
1854 * Fetches the next opcode quad word, returns automatically on failure.
1855 *
1856 * @param a_pu64 Where to return the opcode quad word.
1857 * @remark Implicitly references pIemCpu.
1858 */
1859#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1860 do \
1861 { \
1862 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1863 if (rcStrict2 != VINF_SUCCESS) \
1864 return rcStrict2; \
1865 } while (0)
1866
1867
1868/** @name Misc Worker Functions.
1869 * @{
1870 */
1871
1872
1873/**
1874 * Validates a new SS segment.
1875 *
1876 * @returns VBox strict status code.
1877 * @param pIemCpu The IEM per CPU instance data.
1878 * @param pCtx The CPU context.
1879 * @param NewSS The new SS selctor.
1880 * @param uCpl The CPL to load the stack for.
1881 * @param pDesc Where to return the descriptor.
1882 */
1883static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1884{
1885 NOREF(pCtx);
1886
1887 /* Null selectors are not allowed (we're not called for dispatching
1888 interrupts with SS=0 in long mode). */
1889 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1890 {
1891 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1892 return iemRaiseGeneralProtectionFault0(pIemCpu);
1893 }
1894
1895 /*
1896 * Read the descriptor.
1897 */
1898 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 /*
1903 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1904 */
1905 if (!pDesc->Legacy.Gen.u1DescType)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1908 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1909 }
1910
1911 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1912 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1913 {
1914 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1915 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1916 }
1917 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1918 if ((NewSS & X86_SEL_RPL) != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1921 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1922 }
1923 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1924 {
1925 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1926 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1927 }
1928
1929 /* Is it there? */
1930 /** @todo testcase: Is this checked before the canonical / limit check below? */
1931 if (!pDesc->Legacy.Gen.u1Present)
1932 {
1933 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1934 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1935 }
1936
1937 return VINF_SUCCESS;
1938}
1939
1940
1941/**
1942 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1943 * not.
1944 *
1945 * @param a_pIemCpu The IEM per CPU data.
1946 * @param a_pCtx The CPU context.
1947 */
1948#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1949# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1950 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1951 ? (a_pCtx)->eflags.u \
1952 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1953#else
1954# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1955 ( (a_pCtx)->eflags.u )
1956#endif
1957
1958/**
1959 * Updates the EFLAGS in the correct manner wrt. PATM.
1960 *
1961 * @param a_pIemCpu The IEM per CPU data.
1962 * @param a_pCtx The CPU context.
1963 */
1964#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1965# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1966 do { \
1967 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1968 (a_pCtx)->eflags.u = (a_fEfl); \
1969 else \
1970 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1971 } while (0)
1972#else
1973# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1974 do { \
1975 (a_pCtx)->eflags.u = (a_fEfl); \
1976 } while (0)
1977#endif
1978
1979
1980/** @} */
1981
1982/** @name Raising Exceptions.
1983 *
1984 * @{
1985 */
1986
1987/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1988 * @{ */
1989/** CPU exception. */
1990#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1991/** External interrupt (from PIC, APIC, whatever). */
1992#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1993/** Software interrupt (int or into, not bound).
1994 * Returns to the following instruction */
1995#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1996/** Takes an error code. */
1997#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1998/** Takes a CR2. */
1999#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2000/** Generated by the breakpoint instruction. */
2001#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2002/** @} */
2003
2004
2005/**
2006 * Loads the specified stack far pointer from the TSS.
2007 *
2008 * @returns VBox strict status code.
2009 * @param pIemCpu The IEM per CPU instance data.
2010 * @param pCtx The CPU context.
2011 * @param uCpl The CPL to load the stack for.
2012 * @param pSelSS Where to return the new stack segment.
2013 * @param puEsp Where to return the new stack pointer.
2014 */
2015static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2016 PRTSEL pSelSS, uint32_t *puEsp)
2017{
2018 VBOXSTRICTRC rcStrict;
2019 Assert(uCpl < 4);
2020 *puEsp = 0; /* make gcc happy */
2021 *pSelSS = 0; /* make gcc happy */
2022
2023 switch (pCtx->tr.Attr.n.u4Type)
2024 {
2025 /*
2026 * 16-bit TSS (X86TSS16).
2027 */
2028 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2029 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2030 {
2031 uint32_t off = uCpl * 4 + 2;
2032 if (off + 4 > pCtx->tr.u32Limit)
2033 {
2034 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2035 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2036 }
2037
2038 uint32_t u32Tmp = 0; /* gcc maybe... */
2039 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2040 if (rcStrict == VINF_SUCCESS)
2041 {
2042 *puEsp = RT_LOWORD(u32Tmp);
2043 *pSelSS = RT_HIWORD(u32Tmp);
2044 return VINF_SUCCESS;
2045 }
2046 break;
2047 }
2048
2049 /*
2050 * 32-bit TSS (X86TSS32).
2051 */
2052 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2053 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2054 {
2055 uint32_t off = uCpl * 8 + 4;
2056 if (off + 7 > pCtx->tr.u32Limit)
2057 {
2058 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2059 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2060 }
2061
2062 uint64_t u64Tmp;
2063 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2064 if (rcStrict == VINF_SUCCESS)
2065 {
2066 *puEsp = u64Tmp & UINT32_MAX;
2067 *pSelSS = (RTSEL)(u64Tmp >> 32);
2068 return VINF_SUCCESS;
2069 }
2070 break;
2071 }
2072
2073 default:
2074 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2075 }
2076 return rcStrict;
2077}
2078
2079
2080/**
2081 * Loads the specified stack pointer from the 64-bit TSS.
2082 *
2083 * @returns VBox strict status code.
2084 * @param pIemCpu The IEM per CPU instance data.
2085 * @param pCtx The CPU context.
2086 * @param uCpl The CPL to load the stack for.
2087 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2088 * @param puRsp Where to return the new stack pointer.
2089 */
2090static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2091 uint64_t *puRsp)
2092{
2093 Assert(uCpl < 4);
2094 Assert(uIst < 8);
2095 *puRsp = 0; /* make gcc happy */
2096
2097 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2098
2099 uint32_t off;
2100 if (uIst)
2101 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2102 else
2103 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2104 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2105 {
2106 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2107 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2108 }
2109
2110 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2111}
2112
2113
2114/**
2115 * Adjust the CPU state according to the exception being raised.
2116 *
2117 * @param pCtx The CPU context.
2118 * @param u8Vector The exception that has been raised.
2119 */
2120DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2121{
2122 switch (u8Vector)
2123 {
2124 case X86_XCPT_DB:
2125 pCtx->dr[7] &= ~X86_DR7_GD;
2126 break;
2127 /** @todo Read the AMD and Intel exception reference... */
2128 }
2129}
2130
2131
2132/**
2133 * Implements exceptions and interrupts for real mode.
2134 *
2135 * @returns VBox strict status code.
2136 * @param pIemCpu The IEM per CPU instance data.
2137 * @param pCtx The CPU context.
2138 * @param cbInstr The number of bytes to offset rIP by in the return
2139 * address.
2140 * @param u8Vector The interrupt / exception vector number.
2141 * @param fFlags The flags.
2142 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2143 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2144 */
2145static VBOXSTRICTRC
2146iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2147 PCPUMCTX pCtx,
2148 uint8_t cbInstr,
2149 uint8_t u8Vector,
2150 uint32_t fFlags,
2151 uint16_t uErr,
2152 uint64_t uCr2)
2153{
2154 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2155 NOREF(uErr); NOREF(uCr2);
2156
2157 /*
2158 * Read the IDT entry.
2159 */
2160 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2161 {
2162 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2163 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2164 }
2165 RTFAR16 Idte;
2166 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2167 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2168 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2169 return rcStrict;
2170
2171 /*
2172 * Push the stack frame.
2173 */
2174 uint16_t *pu16Frame;
2175 uint64_t uNewRsp;
2176 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2177 if (rcStrict != VINF_SUCCESS)
2178 return rcStrict;
2179
2180 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2181 pu16Frame[2] = (uint16_t)fEfl;
2182 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2183 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2184 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2185 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2186 return rcStrict;
2187
2188 /*
2189 * Load the vector address into cs:ip and make exception specific state
2190 * adjustments.
2191 */
2192 pCtx->cs.Sel = Idte.sel;
2193 pCtx->cs.ValidSel = Idte.sel;
2194 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2195 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2196 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2197 pCtx->rip = Idte.off;
2198 fEfl &= ~X86_EFL_IF;
2199 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2200
2201 /** @todo do we actually do this in real mode? */
2202 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2203 iemRaiseXcptAdjustState(pCtx, u8Vector);
2204
2205 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2206}
2207
2208
2209/**
2210 * Implements exceptions and interrupts for protected mode.
2211 *
2212 * @returns VBox strict status code.
2213 * @param pIemCpu The IEM per CPU instance data.
2214 * @param pCtx The CPU context.
2215 * @param cbInstr The number of bytes to offset rIP by in the return
2216 * address.
2217 * @param u8Vector The interrupt / exception vector number.
2218 * @param fFlags The flags.
2219 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2220 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2221 */
2222static VBOXSTRICTRC
2223iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2224 PCPUMCTX pCtx,
2225 uint8_t cbInstr,
2226 uint8_t u8Vector,
2227 uint32_t fFlags,
2228 uint16_t uErr,
2229 uint64_t uCr2)
2230{
2231 NOREF(cbInstr);
2232
2233 /*
2234 * Read the IDT entry.
2235 */
2236 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2237 {
2238 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2239 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2240 }
2241 X86DESC Idte;
2242 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2243 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2244 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2245 return rcStrict;
2246 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2247 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2248 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2249
2250 /*
2251 * Check the descriptor type, DPL and such.
2252 * ASSUMES this is done in the same order as described for call-gate calls.
2253 */
2254 if (Idte.Gate.u1DescType)
2255 {
2256 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2257 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2258 }
2259 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2260 switch (Idte.Gate.u4Type)
2261 {
2262 case X86_SEL_TYPE_SYS_UNDEFINED:
2263 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2264 case X86_SEL_TYPE_SYS_LDT:
2265 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2266 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2267 case X86_SEL_TYPE_SYS_UNDEFINED2:
2268 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2269 case X86_SEL_TYPE_SYS_UNDEFINED3:
2270 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2271 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2272 case X86_SEL_TYPE_SYS_UNDEFINED4:
2273 {
2274 /** @todo check what actually happens when the type is wrong...
2275 * esp. call gates. */
2276 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2277 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2278 }
2279
2280 case X86_SEL_TYPE_SYS_286_INT_GATE:
2281 case X86_SEL_TYPE_SYS_386_INT_GATE:
2282 fEflToClear |= X86_EFL_IF;
2283 break;
2284
2285 case X86_SEL_TYPE_SYS_TASK_GATE:
2286 /** @todo task gates. */
2287 AssertFailedReturn(VERR_NOT_SUPPORTED);
2288
2289 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2290 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2291 break;
2292
2293 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2294 }
2295
2296 /* Check DPL against CPL if applicable. */
2297 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2298 {
2299 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2300 {
2301 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2302 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2303 }
2304 }
2305
2306 /* Is it there? */
2307 if (!Idte.Gate.u1Present)
2308 {
2309 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2310 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2311 }
2312
2313 /* A null CS is bad. */
2314 RTSEL NewCS = Idte.Gate.u16Sel;
2315 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2316 {
2317 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2318 return iemRaiseGeneralProtectionFault0(pIemCpu);
2319 }
2320
2321 /* Fetch the descriptor for the new CS. */
2322 IEMSELDESC DescCS;
2323 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2324 if (rcStrict != VINF_SUCCESS)
2325 {
2326 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2327 return rcStrict;
2328 }
2329
2330 /* Must be a code segment. */
2331 if (!DescCS.Legacy.Gen.u1DescType)
2332 {
2333 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2334 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2335 }
2336 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2337 {
2338 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2339 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2340 }
2341
2342 /* Don't allow lowering the privilege level. */
2343 /** @todo Does the lowering of privileges apply to software interrupts
2344 * only? This has bearings on the more-privileged or
2345 * same-privilege stack behavior further down. A testcase would
2346 * be nice. */
2347 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2348 {
2349 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2350 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2351 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2352 }
2353
2354 /* Make sure the selector is present. */
2355 if (!DescCS.Legacy.Gen.u1Present)
2356 {
2357 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2358 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2359 }
2360
2361 /* Check the new EIP against the new CS limit. */
2362 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2363 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2364 ? Idte.Gate.u16OffsetLow
2365 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2366 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2367 if (uNewEip > cbLimitCS)
2368 {
2369 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2370 u8Vector, uNewEip, cbLimitCS, NewCS));
2371 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2372 }
2373
2374 /*
2375 * If the privilege level changes, we need to get a new stack from the TSS.
2376 * This in turns means validating the new SS and ESP...
2377 */
2378 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2379 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2380 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2381 if (uNewCpl != pIemCpu->uCpl)
2382 {
2383 RTSEL NewSS;
2384 uint32_t uNewEsp;
2385 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2386 if (rcStrict != VINF_SUCCESS)
2387 return rcStrict;
2388
2389 IEMSELDESC DescSS;
2390 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2391 if (rcStrict != VINF_SUCCESS)
2392 return rcStrict;
2393
2394 /* Check that there is sufficient space for the stack frame. */
2395 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2396 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2397 {
2398 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2399 }
2400
2401 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2402 if ( uNewEsp - 1 > cbLimitSS
2403 || uNewEsp < cbStackFrame)
2404 {
2405 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2406 u8Vector, NewSS, uNewEsp, cbStackFrame));
2407 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2408 }
2409
2410 /*
2411 * Start making changes.
2412 */
2413
2414 /* Create the stack frame. */
2415 RTPTRUNION uStackFrame;
2416 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2417 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420 void * const pvStackFrame = uStackFrame.pv;
2421
2422 if (fFlags & IEM_XCPT_FLAGS_ERR)
2423 *uStackFrame.pu32++ = uErr;
2424 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
2425 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2426 uStackFrame.pu32[2] = fEfl;
2427 uStackFrame.pu32[3] = pCtx->esp;
2428 uStackFrame.pu32[4] = pCtx->ss.Sel;
2429 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2430 if (rcStrict != VINF_SUCCESS)
2431 return rcStrict;
2432
2433 /* Mark the selectors 'accessed' (hope this is the correct time). */
2434 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2435 * after pushing the stack frame? (Write protect the gdt + stack to
2436 * find out.) */
2437 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2438 {
2439 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2440 if (rcStrict != VINF_SUCCESS)
2441 return rcStrict;
2442 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2443 }
2444
2445 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2446 {
2447 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2448 if (rcStrict != VINF_SUCCESS)
2449 return rcStrict;
2450 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2451 }
2452
2453 /*
2454 * Start comitting the register changes (joins with the DPL=CPL branch).
2455 */
2456 pCtx->ss.Sel = NewSS;
2457 pCtx->ss.ValidSel = NewSS;
2458 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2459 pCtx->ss.u32Limit = cbLimitSS;
2460 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2461 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2462 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2463 pIemCpu->uCpl = uNewCpl;
2464 }
2465 /*
2466 * Same privilege, no stack change and smaller stack frame.
2467 */
2468 else
2469 {
2470 uint64_t uNewRsp;
2471 RTPTRUNION uStackFrame;
2472 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2473 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2474 if (rcStrict != VINF_SUCCESS)
2475 return rcStrict;
2476 void * const pvStackFrame = uStackFrame.pv;
2477
2478 if (fFlags & IEM_XCPT_FLAGS_ERR)
2479 *uStackFrame.pu32++ = uErr;
2480 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2481 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2482 uStackFrame.pu32[2] = fEfl;
2483 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2484 if (rcStrict != VINF_SUCCESS)
2485 return rcStrict;
2486
2487 /* Mark the CS selector as 'accessed'. */
2488 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2489 {
2490 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2491 if (rcStrict != VINF_SUCCESS)
2492 return rcStrict;
2493 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2494 }
2495
2496 /*
2497 * Start committing the register changes (joins with the other branch).
2498 */
2499 pCtx->rsp = uNewRsp;
2500 }
2501
2502 /* ... register committing continues. */
2503 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2504 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2505 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2506 pCtx->cs.u32Limit = cbLimitCS;
2507 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2508 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2509
2510 pCtx->rip = uNewEip;
2511 fEfl &= ~fEflToClear;
2512 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2513
2514 if (fFlags & IEM_XCPT_FLAGS_CR2)
2515 pCtx->cr2 = uCr2;
2516
2517 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2518 iemRaiseXcptAdjustState(pCtx, u8Vector);
2519
2520 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2521}
2522
2523
2524/**
2525 * Implements exceptions and interrupts for V8086 mode.
2526 *
2527 * @returns VBox strict status code.
2528 * @param pIemCpu The IEM per CPU instance data.
2529 * @param pCtx The CPU context.
2530 * @param cbInstr The number of bytes to offset rIP by in the return
2531 * address.
2532 * @param u8Vector The interrupt / exception vector number.
2533 * @param fFlags The flags.
2534 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2535 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2536 */
2537static VBOXSTRICTRC
2538iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2539 PCPUMCTX pCtx,
2540 uint8_t cbInstr,
2541 uint8_t u8Vector,
2542 uint32_t fFlags,
2543 uint16_t uErr,
2544 uint64_t uCr2)
2545{
2546 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2547 /** @todo implement me. */
2548 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2549}
2550
2551
2552/**
2553 * Implements exceptions and interrupts for long mode.
2554 *
2555 * @returns VBox strict status code.
2556 * @param pIemCpu The IEM per CPU instance data.
2557 * @param pCtx The CPU context.
2558 * @param cbInstr The number of bytes to offset rIP by in the return
2559 * address.
2560 * @param u8Vector The interrupt / exception vector number.
2561 * @param fFlags The flags.
2562 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2563 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2564 */
2565static VBOXSTRICTRC
2566iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2567 PCPUMCTX pCtx,
2568 uint8_t cbInstr,
2569 uint8_t u8Vector,
2570 uint32_t fFlags,
2571 uint16_t uErr,
2572 uint64_t uCr2)
2573{
2574 NOREF(cbInstr);
2575
2576 /*
2577 * Read the IDT entry.
2578 */
2579 uint16_t offIdt = (uint16_t)u8Vector << 4;
2580 if (pCtx->idtr.cbIdt < offIdt + 7)
2581 {
2582 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2583 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2584 }
2585 X86DESC64 Idte;
2586 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
2587 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2588 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
2589 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2590 return rcStrict;
2591 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2592 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2593 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2594
2595 /*
2596 * Check the descriptor type, DPL and such.
2597 * ASSUMES this is done in the same order as described for call-gate calls.
2598 */
2599 if (Idte.Gate.u1DescType)
2600 {
2601 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2602 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2603 }
2604 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2605 switch (Idte.Gate.u4Type)
2606 {
2607 case AMD64_SEL_TYPE_SYS_INT_GATE:
2608 fEflToClear |= X86_EFL_IF;
2609 break;
2610 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2611 break;
2612
2613 default:
2614 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2615 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2616 }
2617
2618 /* Check DPL against CPL if applicable. */
2619 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2620 {
2621 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2622 {
2623 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2624 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2625 }
2626 }
2627
2628 /* Is it there? */
2629 if (!Idte.Gate.u1Present)
2630 {
2631 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2632 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2633 }
2634
2635 /* A null CS is bad. */
2636 RTSEL NewCS = Idte.Gate.u16Sel;
2637 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2638 {
2639 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2640 return iemRaiseGeneralProtectionFault0(pIemCpu);
2641 }
2642
2643 /* Fetch the descriptor for the new CS. */
2644 IEMSELDESC DescCS;
2645 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2646 if (rcStrict != VINF_SUCCESS)
2647 {
2648 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2649 return rcStrict;
2650 }
2651
2652 /* Must be a 64-bit code segment. */
2653 if (!DescCS.Long.Gen.u1DescType)
2654 {
2655 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2656 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2657 }
2658 if ( !DescCS.Long.Gen.u1Long
2659 || DescCS.Long.Gen.u1DefBig
2660 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2661 {
2662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2663 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2664 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2665 }
2666
2667 /* Don't allow lowering the privilege level. For non-conforming CS
2668 selectors, the CS.DPL sets the privilege level the trap/interrupt
2669 handler runs at. For conforming CS selectors, the CPL remains
2670 unchanged, but the CS.DPL must be <= CPL. */
2671 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2672 * when CPU in Ring-0. Result \#GP? */
2673 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2674 {
2675 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2676 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2677 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2678 }
2679
2680
2681 /* Make sure the selector is present. */
2682 if (!DescCS.Legacy.Gen.u1Present)
2683 {
2684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2685 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2686 }
2687
2688 /* Check that the new RIP is canonical. */
2689 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2690 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2691 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2692 if (!IEM_IS_CANONICAL(uNewRip))
2693 {
2694 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2695 return iemRaiseGeneralProtectionFault0(pIemCpu);
2696 }
2697
2698 /*
2699 * If the privilege level changes or if the IST isn't zero, we need to get
2700 * a new stack from the TSS.
2701 */
2702 uint64_t uNewRsp;
2703 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2704 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2705 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2706 if ( uNewCpl != pIemCpu->uCpl
2707 || Idte.Gate.u3IST != 0)
2708 {
2709 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2710 if (rcStrict != VINF_SUCCESS)
2711 return rcStrict;
2712 }
2713 else
2714 uNewRsp = pCtx->rsp;
2715 uNewRsp &= ~(uint64_t)0xf;
2716
2717 /*
2718 * Start making changes.
2719 */
2720
2721 /* Create the stack frame. */
2722 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2723 RTPTRUNION uStackFrame;
2724 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2725 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2726 if (rcStrict != VINF_SUCCESS)
2727 return rcStrict;
2728 void * const pvStackFrame = uStackFrame.pv;
2729
2730 if (fFlags & IEM_XCPT_FLAGS_ERR)
2731 *uStackFrame.pu64++ = uErr;
2732 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
2733 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
2734 uStackFrame.pu64[2] = fEfl;
2735 uStackFrame.pu64[3] = pCtx->rsp;
2736 uStackFrame.pu64[4] = pCtx->ss.Sel;
2737 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2738 if (rcStrict != VINF_SUCCESS)
2739 return rcStrict;
2740
2741 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2742 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2743 * after pushing the stack frame? (Write protect the gdt + stack to
2744 * find out.) */
2745 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2746 {
2747 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2748 if (rcStrict != VINF_SUCCESS)
2749 return rcStrict;
2750 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2751 }
2752
2753 /*
2754 * Start comitting the register changes.
2755 */
2756 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2757 * hidden registers when interrupting 32-bit or 16-bit code! */
2758 if (uNewCpl != pIemCpu->uCpl)
2759 {
2760 pCtx->ss.Sel = 0 | uNewCpl;
2761 pCtx->ss.ValidSel = 0 | uNewCpl;
2762 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2763 pCtx->ss.u32Limit = UINT32_MAX;
2764 pCtx->ss.u64Base = 0;
2765 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2766 }
2767 pCtx->rsp = uNewRsp - cbStackFrame;
2768 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2769 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2770 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2771 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2772 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2773 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pCtx->rip = uNewRip;
2775 pIemCpu->uCpl = uNewCpl;
2776
2777 fEfl &= ~fEflToClear;
2778 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2779
2780 if (fFlags & IEM_XCPT_FLAGS_CR2)
2781 pCtx->cr2 = uCr2;
2782
2783 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2784 iemRaiseXcptAdjustState(pCtx, u8Vector);
2785
2786 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2787}
2788
2789
2790/**
2791 * Implements exceptions and interrupts.
2792 *
2793 * All exceptions and interrupts goes thru this function!
2794 *
2795 * @returns VBox strict status code.
2796 * @param pIemCpu The IEM per CPU instance data.
2797 * @param cbInstr The number of bytes to offset rIP by in the return
2798 * address.
2799 * @param u8Vector The interrupt / exception vector number.
2800 * @param fFlags The flags.
2801 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2802 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2803 */
2804DECL_NO_INLINE(static, VBOXSTRICTRC)
2805iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2806 uint8_t cbInstr,
2807 uint8_t u8Vector,
2808 uint32_t fFlags,
2809 uint16_t uErr,
2810 uint64_t uCr2)
2811{
2812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2813
2814 /*
2815 * Do recursion accounting.
2816 */
2817 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2818 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2819 if (pIemCpu->cXcptRecursions == 0)
2820 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2821 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2822 else
2823 {
2824 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2825 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2826
2827 /** @todo double and tripple faults. */
2828 if (pIemCpu->cXcptRecursions >= 3)
2829 {
2830#ifdef DEBUG_bird
2831 AssertFailed();
2832#endif
2833 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2834 }
2835
2836 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2837 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2838 {
2839 ....
2840 } */
2841 }
2842 pIemCpu->cXcptRecursions++;
2843 pIemCpu->uCurXcpt = u8Vector;
2844 pIemCpu->fCurXcpt = fFlags;
2845
2846 /*
2847 * Extensive logging.
2848 */
2849#if defined(LOG_ENABLED) && defined(IN_RING3)
2850 if (LogIs3Enabled())
2851 {
2852 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2853 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2854 char szRegs[4096];
2855 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2856 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2857 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2858 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2859 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2860 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2861 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2862 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2863 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2864 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2865 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2866 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2867 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2868 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2869 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2870 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2871 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2872 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2873 " efer=%016VR{efer}\n"
2874 " pat=%016VR{pat}\n"
2875 " sf_mask=%016VR{sf_mask}\n"
2876 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2877 " lstar=%016VR{lstar}\n"
2878 " star=%016VR{star} cstar=%016VR{cstar}\n"
2879 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2880 );
2881
2882 char szInstr[256];
2883 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2884 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2885 szInstr, sizeof(szInstr), NULL);
2886 Log3(("%s%s\n", szRegs, szInstr));
2887 }
2888#endif /* LOG_ENABLED */
2889
2890 /*
2891 * Call the mode specific worker function.
2892 */
2893 VBOXSTRICTRC rcStrict;
2894 if (!(pCtx->cr0 & X86_CR0_PE))
2895 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2896 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2897 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2898 else if (!pCtx->eflags.Bits.u1VM)
2899 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2900 else
2901 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2902
2903 /*
2904 * Unwind.
2905 */
2906 pIemCpu->cXcptRecursions--;
2907 pIemCpu->uCurXcpt = uPrevXcpt;
2908 pIemCpu->fCurXcpt = fPrevXcpt;
2909 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2910 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2911 return rcStrict;
2912}
2913
2914
2915/** \#DE - 00. */
2916DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2917{
2918 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2919}
2920
2921
2922/** \#DB - 01. */
2923DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2924{
2925 /** @todo set/clear RF. */
2926 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2927}
2928
2929
2930/** \#UD - 06. */
2931DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2932{
2933 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2934}
2935
2936
2937/** \#NM - 07. */
2938DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2939{
2940 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2941}
2942
2943
2944#ifdef SOME_UNUSED_FUNCTION
2945/** \#TS(err) - 0a. */
2946DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2947{
2948 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2949}
2950#endif
2951
2952
2953/** \#TS(tr) - 0a. */
2954DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2955{
2956 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2957 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2958}
2959
2960
2961/** \#NP(err) - 0b. */
2962DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2963{
2964 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2965}
2966
2967
2968/** \#NP(seg) - 0b. */
2969DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2970{
2971 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2972 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2973}
2974
2975
2976/** \#NP(sel) - 0b. */
2977DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2978{
2979 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2980 uSel & ~X86_SEL_RPL, 0);
2981}
2982
2983
2984/** \#SS(seg) - 0c. */
2985DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2986{
2987 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2988 uSel & ~X86_SEL_RPL, 0);
2989}
2990
2991
2992/** \#GP(n) - 0d. */
2993DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2994{
2995 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2996}
2997
2998
2999/** \#GP(0) - 0d. */
3000DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
3001{
3002 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3003}
3004
3005
3006/** \#GP(sel) - 0d. */
3007DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3008{
3009 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3010 Sel & ~X86_SEL_RPL, 0);
3011}
3012
3013
3014/** \#GP(0) - 0d. */
3015DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
3016{
3017 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3018}
3019
3020
3021/** \#GP(sel) - 0d. */
3022DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3023{
3024 NOREF(iSegReg); NOREF(fAccess);
3025 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
3026 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3027}
3028
3029
3030/** \#GP(sel) - 0d. */
3031DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3032{
3033 NOREF(Sel);
3034 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3035}
3036
3037
3038/** \#GP(sel) - 0d. */
3039DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3040{
3041 NOREF(iSegReg); NOREF(fAccess);
3042 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3043}
3044
3045
3046/** \#PF(n) - 0e. */
3047DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
3048{
3049 uint16_t uErr;
3050 switch (rc)
3051 {
3052 case VERR_PAGE_NOT_PRESENT:
3053 case VERR_PAGE_TABLE_NOT_PRESENT:
3054 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
3055 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
3056 uErr = 0;
3057 break;
3058
3059 default:
3060 AssertMsgFailed(("%Rrc\n", rc));
3061 case VERR_ACCESS_DENIED:
3062 uErr = X86_TRAP_PF_P;
3063 break;
3064
3065 /** @todo reserved */
3066 }
3067
3068 if (pIemCpu->uCpl == 3)
3069 uErr |= X86_TRAP_PF_US;
3070
3071 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
3072 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
3073 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
3074 uErr |= X86_TRAP_PF_ID;
3075
3076 /* Note! RW access callers reporting a WRITE protection fault, will clear
3077 the READ flag before calling. So, read-modify-write accesses (RW)
3078 can safely be reported as READ faults. */
3079 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
3080 uErr |= X86_TRAP_PF_RW;
3081
3082 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
3083 uErr, GCPtrWhere);
3084}
3085
3086
3087/** \#MF(0) - 10. */
3088DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
3089{
3090 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3091}
3092
3093
3094/** \#AC(0) - 11. */
3095DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
3096{
3097 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3098}
3099
3100
3101/**
3102 * Macro for calling iemCImplRaiseDivideError().
3103 *
3104 * This enables us to add/remove arguments and force different levels of
3105 * inlining as we wish.
3106 *
3107 * @return Strict VBox status code.
3108 */
3109#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
3110IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3111{
3112 NOREF(cbInstr);
3113 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3114}
3115
3116
3117/**
3118 * Macro for calling iemCImplRaiseInvalidLockPrefix().
3119 *
3120 * This enables us to add/remove arguments and force different levels of
3121 * inlining as we wish.
3122 *
3123 * @return Strict VBox status code.
3124 */
3125#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
3126IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3127{
3128 NOREF(cbInstr);
3129 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3130}
3131
3132
3133/**
3134 * Macro for calling iemCImplRaiseInvalidOpcode().
3135 *
3136 * This enables us to add/remove arguments and force different levels of
3137 * inlining as we wish.
3138 *
3139 * @return Strict VBox status code.
3140 */
3141#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
3142IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3143{
3144 NOREF(cbInstr);
3145 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3146}
3147
3148
3149/** @} */
3150
3151
3152/*
3153 *
3154 * Helpers routines.
3155 * Helpers routines.
3156 * Helpers routines.
3157 *
3158 */
3159
3160/**
3161 * Recalculates the effective operand size.
3162 *
3163 * @param pIemCpu The IEM state.
3164 */
3165static void iemRecalEffOpSize(PIEMCPU pIemCpu)
3166{
3167 switch (pIemCpu->enmCpuMode)
3168 {
3169 case IEMMODE_16BIT:
3170 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
3171 break;
3172 case IEMMODE_32BIT:
3173 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
3174 break;
3175 case IEMMODE_64BIT:
3176 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
3177 {
3178 case 0:
3179 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
3180 break;
3181 case IEM_OP_PRF_SIZE_OP:
3182 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3183 break;
3184 case IEM_OP_PRF_SIZE_REX_W:
3185 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
3186 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3187 break;
3188 }
3189 break;
3190 default:
3191 AssertFailed();
3192 }
3193}
3194
3195
3196/**
3197 * Sets the default operand size to 64-bit and recalculates the effective
3198 * operand size.
3199 *
3200 * @param pIemCpu The IEM state.
3201 */
3202static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
3203{
3204 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3205 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
3206 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
3207 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3208 else
3209 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3210}
3211
3212
3213/*
3214 *
3215 * Common opcode decoders.
3216 * Common opcode decoders.
3217 * Common opcode decoders.
3218 *
3219 */
3220//#include <iprt/mem.h>
3221
3222/**
3223 * Used to add extra details about a stub case.
3224 * @param pIemCpu The IEM per CPU state.
3225 */
3226static void iemOpStubMsg2(PIEMCPU pIemCpu)
3227{
3228#if defined(LOG_ENABLED) && defined(IN_RING3)
3229 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3230 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3231 char szRegs[4096];
3232 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3233 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3234 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3235 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3236 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3237 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3238 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3239 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3240 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3241 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3242 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3243 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3244 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3245 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3246 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3247 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3248 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3249 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3250 " efer=%016VR{efer}\n"
3251 " pat=%016VR{pat}\n"
3252 " sf_mask=%016VR{sf_mask}\n"
3253 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3254 " lstar=%016VR{lstar}\n"
3255 " star=%016VR{star} cstar=%016VR{cstar}\n"
3256 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3257 );
3258
3259 char szInstr[256];
3260 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3261 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3262 szInstr, sizeof(szInstr), NULL);
3263
3264 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
3265#else
3266 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
3267#endif
3268}
3269
3270/**
3271 * Complains about a stub.
3272 *
3273 * Providing two versions of this macro, one for daily use and one for use when
3274 * working on IEM.
3275 */
3276#if 0
3277# define IEMOP_BITCH_ABOUT_STUB() \
3278 do { \
3279 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
3280 iemOpStubMsg2(pIemCpu); \
3281 RTAssertPanic(); \
3282 } while (0)
3283#else
3284# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
3285#endif
3286
3287/** Stubs an opcode. */
3288#define FNIEMOP_STUB(a_Name) \
3289 FNIEMOP_DEF(a_Name) \
3290 { \
3291 IEMOP_BITCH_ABOUT_STUB(); \
3292 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3293 } \
3294 typedef int ignore_semicolon
3295
3296/** Stubs an opcode. */
3297#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
3298 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3299 { \
3300 IEMOP_BITCH_ABOUT_STUB(); \
3301 NOREF(a_Name0); \
3302 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3303 } \
3304 typedef int ignore_semicolon
3305
3306/** Stubs an opcode which currently should raise \#UD. */
3307#define FNIEMOP_UD_STUB(a_Name) \
3308 FNIEMOP_DEF(a_Name) \
3309 { \
3310 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3311 return IEMOP_RAISE_INVALID_OPCODE(); \
3312 } \
3313 typedef int ignore_semicolon
3314
3315/** Stubs an opcode which currently should raise \#UD. */
3316#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
3317 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3318 { \
3319 NOREF(a_Name0); \
3320 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3321 return IEMOP_RAISE_INVALID_OPCODE(); \
3322 } \
3323 typedef int ignore_semicolon
3324
3325
3326
3327/** @name Register Access.
3328 * @{
3329 */
3330
3331/**
3332 * Gets a reference (pointer) to the specified hidden segment register.
3333 *
3334 * @returns Hidden register reference.
3335 * @param pIemCpu The per CPU data.
3336 * @param iSegReg The segment register.
3337 */
3338static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
3339{
3340 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3341 PCPUMSELREG pSReg;
3342 switch (iSegReg)
3343 {
3344 case X86_SREG_ES: pSReg = &pCtx->es; break;
3345 case X86_SREG_CS: pSReg = &pCtx->cs; break;
3346 case X86_SREG_SS: pSReg = &pCtx->ss; break;
3347 case X86_SREG_DS: pSReg = &pCtx->ds; break;
3348 case X86_SREG_FS: pSReg = &pCtx->fs; break;
3349 case X86_SREG_GS: pSReg = &pCtx->gs; break;
3350 default:
3351 AssertFailedReturn(NULL);
3352 }
3353#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3354 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
3355 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
3356#else
3357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
3358#endif
3359 return pSReg;
3360}
3361
3362
3363/**
3364 * Gets a reference (pointer) to the specified segment register (the selector
3365 * value).
3366 *
3367 * @returns Pointer to the selector variable.
3368 * @param pIemCpu The per CPU data.
3369 * @param iSegReg The segment register.
3370 */
3371static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
3372{
3373 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3374 switch (iSegReg)
3375 {
3376 case X86_SREG_ES: return &pCtx->es.Sel;
3377 case X86_SREG_CS: return &pCtx->cs.Sel;
3378 case X86_SREG_SS: return &pCtx->ss.Sel;
3379 case X86_SREG_DS: return &pCtx->ds.Sel;
3380 case X86_SREG_FS: return &pCtx->fs.Sel;
3381 case X86_SREG_GS: return &pCtx->gs.Sel;
3382 }
3383 AssertFailedReturn(NULL);
3384}
3385
3386
3387/**
3388 * Fetches the selector value of a segment register.
3389 *
3390 * @returns The selector value.
3391 * @param pIemCpu The per CPU data.
3392 * @param iSegReg The segment register.
3393 */
3394static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3395{
3396 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3397 switch (iSegReg)
3398 {
3399 case X86_SREG_ES: return pCtx->es.Sel;
3400 case X86_SREG_CS: return pCtx->cs.Sel;
3401 case X86_SREG_SS: return pCtx->ss.Sel;
3402 case X86_SREG_DS: return pCtx->ds.Sel;
3403 case X86_SREG_FS: return pCtx->fs.Sel;
3404 case X86_SREG_GS: return pCtx->gs.Sel;
3405 }
3406 AssertFailedReturn(0xffff);
3407}
3408
3409
3410/**
3411 * Gets a reference (pointer) to the specified general register.
3412 *
3413 * @returns Register reference.
3414 * @param pIemCpu The per CPU data.
3415 * @param iReg The general register.
3416 */
3417static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3418{
3419 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3420 switch (iReg)
3421 {
3422 case X86_GREG_xAX: return &pCtx->rax;
3423 case X86_GREG_xCX: return &pCtx->rcx;
3424 case X86_GREG_xDX: return &pCtx->rdx;
3425 case X86_GREG_xBX: return &pCtx->rbx;
3426 case X86_GREG_xSP: return &pCtx->rsp;
3427 case X86_GREG_xBP: return &pCtx->rbp;
3428 case X86_GREG_xSI: return &pCtx->rsi;
3429 case X86_GREG_xDI: return &pCtx->rdi;
3430 case X86_GREG_x8: return &pCtx->r8;
3431 case X86_GREG_x9: return &pCtx->r9;
3432 case X86_GREG_x10: return &pCtx->r10;
3433 case X86_GREG_x11: return &pCtx->r11;
3434 case X86_GREG_x12: return &pCtx->r12;
3435 case X86_GREG_x13: return &pCtx->r13;
3436 case X86_GREG_x14: return &pCtx->r14;
3437 case X86_GREG_x15: return &pCtx->r15;
3438 }
3439 AssertFailedReturn(NULL);
3440}
3441
3442
3443/**
3444 * Gets a reference (pointer) to the specified 8-bit general register.
3445 *
3446 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3447 *
3448 * @returns Register reference.
3449 * @param pIemCpu The per CPU data.
3450 * @param iReg The register.
3451 */
3452static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3453{
3454 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3455 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3456
3457 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3458 if (iReg >= 4)
3459 pu8Reg++;
3460 return pu8Reg;
3461}
3462
3463
3464/**
3465 * Fetches the value of a 8-bit general register.
3466 *
3467 * @returns The register value.
3468 * @param pIemCpu The per CPU data.
3469 * @param iReg The register.
3470 */
3471static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3472{
3473 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3474 return *pbSrc;
3475}
3476
3477
3478/**
3479 * Fetches the value of a 16-bit general register.
3480 *
3481 * @returns The register value.
3482 * @param pIemCpu The per CPU data.
3483 * @param iReg The register.
3484 */
3485static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3486{
3487 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3488}
3489
3490
3491/**
3492 * Fetches the value of a 32-bit general register.
3493 *
3494 * @returns The register value.
3495 * @param pIemCpu The per CPU data.
3496 * @param iReg The register.
3497 */
3498static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3499{
3500 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3501}
3502
3503
3504/**
3505 * Fetches the value of a 64-bit general register.
3506 *
3507 * @returns The register value.
3508 * @param pIemCpu The per CPU data.
3509 * @param iReg The register.
3510 */
3511static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3512{
3513 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3514}
3515
3516
3517/**
3518 * Is the FPU state in FXSAVE format or not.
3519 *
3520 * @returns true if it is, false if it's in FNSAVE.
3521 * @param pVCpu Pointer to the VMCPU.
3522 */
3523DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3524{
3525#ifdef RT_ARCH_AMD64
3526 NOREF(pIemCpu);
3527 return true;
3528#else
3529 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3530 return true;
3531#endif
3532}
3533
3534
3535/**
3536 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3537 *
3538 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3539 * segment limit.
3540 *
3541 * @param pIemCpu The per CPU data.
3542 * @param offNextInstr The offset of the next instruction.
3543 */
3544static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3545{
3546 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3547 switch (pIemCpu->enmEffOpSize)
3548 {
3549 case IEMMODE_16BIT:
3550 {
3551 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3552 if ( uNewIp > pCtx->cs.u32Limit
3553 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3554 return iemRaiseGeneralProtectionFault0(pIemCpu);
3555 pCtx->rip = uNewIp;
3556 break;
3557 }
3558
3559 case IEMMODE_32BIT:
3560 {
3561 Assert(pCtx->rip <= UINT32_MAX);
3562 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3563
3564 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3565 if (uNewEip > pCtx->cs.u32Limit)
3566 return iemRaiseGeneralProtectionFault0(pIemCpu);
3567 pCtx->rip = uNewEip;
3568 break;
3569 }
3570
3571 case IEMMODE_64BIT:
3572 {
3573 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3574
3575 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3576 if (!IEM_IS_CANONICAL(uNewRip))
3577 return iemRaiseGeneralProtectionFault0(pIemCpu);
3578 pCtx->rip = uNewRip;
3579 break;
3580 }
3581
3582 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3583 }
3584
3585 return VINF_SUCCESS;
3586}
3587
3588
3589/**
3590 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3591 *
3592 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3593 * segment limit.
3594 *
3595 * @returns Strict VBox status code.
3596 * @param pIemCpu The per CPU data.
3597 * @param offNextInstr The offset of the next instruction.
3598 */
3599static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3600{
3601 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3602 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3603
3604 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3605 if ( uNewIp > pCtx->cs.u32Limit
3606 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3607 return iemRaiseGeneralProtectionFault0(pIemCpu);
3608 /** @todo Test 16-bit jump in 64-bit mode. */
3609 pCtx->rip = uNewIp;
3610
3611 return VINF_SUCCESS;
3612}
3613
3614
3615/**
3616 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3617 *
3618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3619 * segment limit.
3620 *
3621 * @returns Strict VBox status code.
3622 * @param pIemCpu The per CPU data.
3623 * @param offNextInstr The offset of the next instruction.
3624 */
3625static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3626{
3627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3628 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3629
3630 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3631 {
3632 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3633
3634 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3635 if (uNewEip > pCtx->cs.u32Limit)
3636 return iemRaiseGeneralProtectionFault0(pIemCpu);
3637 pCtx->rip = uNewEip;
3638 }
3639 else
3640 {
3641 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3642
3643 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3644 if (!IEM_IS_CANONICAL(uNewRip))
3645 return iemRaiseGeneralProtectionFault0(pIemCpu);
3646 pCtx->rip = uNewRip;
3647 }
3648 return VINF_SUCCESS;
3649}
3650
3651
3652/**
3653 * Performs a near jump to the specified address.
3654 *
3655 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3656 * segment limit.
3657 *
3658 * @param pIemCpu The per CPU data.
3659 * @param uNewRip The new RIP value.
3660 */
3661static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3662{
3663 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3664 switch (pIemCpu->enmEffOpSize)
3665 {
3666 case IEMMODE_16BIT:
3667 {
3668 Assert(uNewRip <= UINT16_MAX);
3669 if ( uNewRip > pCtx->cs.u32Limit
3670 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3671 return iemRaiseGeneralProtectionFault0(pIemCpu);
3672 /** @todo Test 16-bit jump in 64-bit mode. */
3673 pCtx->rip = uNewRip;
3674 break;
3675 }
3676
3677 case IEMMODE_32BIT:
3678 {
3679 Assert(uNewRip <= UINT32_MAX);
3680 Assert(pCtx->rip <= UINT32_MAX);
3681 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3682
3683 if (uNewRip > pCtx->cs.u32Limit)
3684 return iemRaiseGeneralProtectionFault0(pIemCpu);
3685 pCtx->rip = uNewRip;
3686 break;
3687 }
3688
3689 case IEMMODE_64BIT:
3690 {
3691 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3692
3693 if (!IEM_IS_CANONICAL(uNewRip))
3694 return iemRaiseGeneralProtectionFault0(pIemCpu);
3695 pCtx->rip = uNewRip;
3696 break;
3697 }
3698
3699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3700 }
3701
3702 return VINF_SUCCESS;
3703}
3704
3705
3706/**
3707 * Get the address of the top of the stack.
3708 *
3709 * @param pIemCpu The per CPU data.
3710 * @param pCtx The CPU context which SP/ESP/RSP should be
3711 * read.
3712 */
3713DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
3714{
3715 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3716 return pCtx->rsp;
3717 if (pCtx->ss.Attr.n.u1DefBig)
3718 return pCtx->esp;
3719 return pCtx->sp;
3720}
3721
3722
3723/**
3724 * Updates the RIP/EIP/IP to point to the next instruction.
3725 *
3726 * @param pIemCpu The per CPU data.
3727 * @param cbInstr The number of bytes to add.
3728 */
3729static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3730{
3731 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3732 switch (pIemCpu->enmCpuMode)
3733 {
3734 case IEMMODE_16BIT:
3735 Assert(pCtx->rip <= UINT16_MAX);
3736 pCtx->eip += cbInstr;
3737 pCtx->eip &= UINT32_C(0xffff);
3738 break;
3739
3740 case IEMMODE_32BIT:
3741 pCtx->eip += cbInstr;
3742 Assert(pCtx->rip <= UINT32_MAX);
3743 break;
3744
3745 case IEMMODE_64BIT:
3746 pCtx->rip += cbInstr;
3747 break;
3748 default: AssertFailed();
3749 }
3750}
3751
3752
3753/**
3754 * Updates the RIP/EIP/IP to point to the next instruction.
3755 *
3756 * @param pIemCpu The per CPU data.
3757 */
3758static void iemRegUpdateRip(PIEMCPU pIemCpu)
3759{
3760 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3761}
3762
3763
3764/**
3765 * Adds to the stack pointer.
3766 *
3767 * @param pIemCpu The per CPU data.
3768 * @param pCtx The CPU context which SP/ESP/RSP should be
3769 * updated.
3770 * @param cbToAdd The number of bytes to add.
3771 */
3772DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
3773{
3774 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3775 pCtx->rsp += cbToAdd;
3776 else if (pCtx->ss.Attr.n.u1DefBig)
3777 pCtx->esp += cbToAdd;
3778 else
3779 pCtx->sp += cbToAdd;
3780}
3781
3782
3783/**
3784 * Subtracts from the stack pointer.
3785 *
3786 * @param pIemCpu The per CPU data.
3787 * @param pCtx The CPU context which SP/ESP/RSP should be
3788 * updated.
3789 * @param cbToSub The number of bytes to subtract.
3790 */
3791DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
3792{
3793 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3794 pCtx->rsp -= cbToSub;
3795 else if (pCtx->ss.Attr.n.u1DefBig)
3796 pCtx->esp -= cbToSub;
3797 else
3798 pCtx->sp -= cbToSub;
3799}
3800
3801
3802/**
3803 * Adds to the temporary stack pointer.
3804 *
3805 * @param pIemCpu The per CPU data.
3806 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3807 * @param cbToAdd The number of bytes to add.
3808 * @param pCtx Where to get the current stack mode.
3809 */
3810DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
3811{
3812 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3813 pTmpRsp->u += cbToAdd;
3814 else if (pCtx->ss.Attr.n.u1DefBig)
3815 pTmpRsp->DWords.dw0 += cbToAdd;
3816 else
3817 pTmpRsp->Words.w0 += cbToAdd;
3818}
3819
3820
3821/**
3822 * Subtracts from the temporary stack pointer.
3823 *
3824 * @param pIemCpu The per CPU data.
3825 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3826 * @param cbToSub The number of bytes to subtract.
3827 * @param pCtx Where to get the current stack mode.
3828 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3829 * expecting that.
3830 */
3831DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
3832{
3833 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3834 pTmpRsp->u -= cbToSub;
3835 else if (pCtx->ss.Attr.n.u1DefBig)
3836 pTmpRsp->DWords.dw0 -= cbToSub;
3837 else
3838 pTmpRsp->Words.w0 -= cbToSub;
3839}
3840
3841
3842/**
3843 * Calculates the effective stack address for a push of the specified size as
3844 * well as the new RSP value (upper bits may be masked).
3845 *
3846 * @returns Effective stack addressf for the push.
3847 * @param pIemCpu The IEM per CPU data.
3848 * @param pCtx Where to get the current stack mode.
3849 * @param cbItem The size of the stack item to pop.
3850 * @param puNewRsp Where to return the new RSP value.
3851 */
3852DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3853{
3854 RTUINT64U uTmpRsp;
3855 RTGCPTR GCPtrTop;
3856 uTmpRsp.u = pCtx->rsp;
3857
3858 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3859 GCPtrTop = uTmpRsp.u -= cbItem;
3860 else if (pCtx->ss.Attr.n.u1DefBig)
3861 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3862 else
3863 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3864 *puNewRsp = uTmpRsp.u;
3865 return GCPtrTop;
3866}
3867
3868
3869/**
3870 * Gets the current stack pointer and calculates the value after a pop of the
3871 * specified size.
3872 *
3873 * @returns Current stack pointer.
3874 * @param pIemCpu The per CPU data.
3875 * @param pCtx Where to get the current stack mode.
3876 * @param cbItem The size of the stack item to pop.
3877 * @param puNewRsp Where to return the new RSP value.
3878 */
3879DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3880{
3881 RTUINT64U uTmpRsp;
3882 RTGCPTR GCPtrTop;
3883 uTmpRsp.u = pCtx->rsp;
3884
3885 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3886 {
3887 GCPtrTop = uTmpRsp.u;
3888 uTmpRsp.u += cbItem;
3889 }
3890 else if (pCtx->ss.Attr.n.u1DefBig)
3891 {
3892 GCPtrTop = uTmpRsp.DWords.dw0;
3893 uTmpRsp.DWords.dw0 += cbItem;
3894 }
3895 else
3896 {
3897 GCPtrTop = uTmpRsp.Words.w0;
3898 uTmpRsp.Words.w0 += cbItem;
3899 }
3900 *puNewRsp = uTmpRsp.u;
3901 return GCPtrTop;
3902}
3903
3904
3905/**
3906 * Calculates the effective stack address for a push of the specified size as
3907 * well as the new temporary RSP value (upper bits may be masked).
3908 *
3909 * @returns Effective stack addressf for the push.
3910 * @param pIemCpu The per CPU data.
3911 * @param pTmpRsp The temporary stack pointer. This is updated.
3912 * @param cbItem The size of the stack item to pop.
3913 * @param puNewRsp Where to return the new RSP value.
3914 */
3915DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
3916{
3917 RTGCPTR GCPtrTop;
3918
3919 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3920 GCPtrTop = pTmpRsp->u -= cbItem;
3921 else if (pCtx->ss.Attr.n.u1DefBig)
3922 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3923 else
3924 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3925 return GCPtrTop;
3926}
3927
3928
3929/**
3930 * Gets the effective stack address for a pop of the specified size and
3931 * calculates and updates the temporary RSP.
3932 *
3933 * @returns Current stack pointer.
3934 * @param pIemCpu The per CPU data.
3935 * @param pTmpRsp The temporary stack pointer. This is updated.
3936 * @param pCtx Where to get the current stack mode.
3937 * @param cbItem The size of the stack item to pop.
3938 */
3939DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
3940{
3941 RTGCPTR GCPtrTop;
3942 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3943 {
3944 GCPtrTop = pTmpRsp->u;
3945 pTmpRsp->u += cbItem;
3946 }
3947 else if (pCtx->ss.Attr.n.u1DefBig)
3948 {
3949 GCPtrTop = pTmpRsp->DWords.dw0;
3950 pTmpRsp->DWords.dw0 += cbItem;
3951 }
3952 else
3953 {
3954 GCPtrTop = pTmpRsp->Words.w0;
3955 pTmpRsp->Words.w0 += cbItem;
3956 }
3957 return GCPtrTop;
3958}
3959
3960
3961/**
3962 * Checks if an Intel CPUID feature bit is set.
3963 *
3964 * @returns true / false.
3965 *
3966 * @param pIemCpu The IEM per CPU data.
3967 * @param fEdx The EDX bit to test, or 0 if ECX.
3968 * @param fEcx The ECX bit to test, or 0 if EDX.
3969 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3970 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3971 */
3972static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3973{
3974 uint32_t uEax, uEbx, uEcx, uEdx;
3975 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3976 return (fEcx && (uEcx & fEcx))
3977 || (fEdx && (uEdx & fEdx));
3978}
3979
3980
3981/**
3982 * Checks if an AMD CPUID feature bit is set.
3983 *
3984 * @returns true / false.
3985 *
3986 * @param pIemCpu The IEM per CPU data.
3987 * @param fEdx The EDX bit to test, or 0 if ECX.
3988 * @param fEcx The ECX bit to test, or 0 if EDX.
3989 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3990 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3991 */
3992static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3993{
3994 uint32_t uEax, uEbx, uEcx, uEdx;
3995 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3996 return (fEcx && (uEcx & fEcx))
3997 || (fEdx && (uEdx & fEdx));
3998}
3999
4000/** @} */
4001
4002
4003/** @name FPU access and helpers.
4004 *
4005 * @{
4006 */
4007
4008
4009/**
4010 * Hook for preparing to use the host FPU.
4011 *
4012 * This is necessary in ring-0 and raw-mode context.
4013 *
4014 * @param pIemCpu The IEM per CPU data.
4015 */
4016DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
4017{
4018#ifdef IN_RING3
4019 NOREF(pIemCpu);
4020#else
4021/** @todo RZ: FIXME */
4022//# error "Implement me"
4023#endif
4024}
4025
4026
4027/**
4028 * Hook for preparing to use the host FPU for SSE
4029 *
4030 * This is necessary in ring-0 and raw-mode context.
4031 *
4032 * @param pIemCpu The IEM per CPU data.
4033 */
4034DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
4035{
4036 iemFpuPrepareUsage(pIemCpu);
4037}
4038
4039
4040/**
4041 * Stores a QNaN value into a FPU register.
4042 *
4043 * @param pReg Pointer to the register.
4044 */
4045DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
4046{
4047 pReg->au32[0] = UINT32_C(0x00000000);
4048 pReg->au32[1] = UINT32_C(0xc0000000);
4049 pReg->au16[4] = UINT16_C(0xffff);
4050}
4051
4052
4053/**
4054 * Updates the FOP, FPU.CS and FPUIP registers.
4055 *
4056 * @param pIemCpu The IEM per CPU data.
4057 * @param pCtx The CPU context.
4058 */
4059DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4060{
4061 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
4062 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
4063 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
4064 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4065 {
4066 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
4067 * happens in real mode here based on the fnsave and fnstenv images. */
4068 pCtx->fpu.CS = 0;
4069 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
4070 }
4071 else
4072 {
4073 pCtx->fpu.CS = pCtx->cs.Sel;
4074 pCtx->fpu.FPUIP = pCtx->rip;
4075 }
4076}
4077
4078
4079/**
4080 * Updates the FPU.DS and FPUDP registers.
4081 *
4082 * @param pIemCpu The IEM per CPU data.
4083 * @param pCtx The CPU context.
4084 * @param iEffSeg The effective segment register.
4085 * @param GCPtrEff The effective address relative to @a iEffSeg.
4086 */
4087DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4088{
4089 RTSEL sel;
4090 switch (iEffSeg)
4091 {
4092 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
4093 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
4094 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
4095 case X86_SREG_ES: sel = pCtx->es.Sel; break;
4096 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
4097 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
4098 default:
4099 AssertMsgFailed(("%d\n", iEffSeg));
4100 sel = pCtx->ds.Sel;
4101 }
4102 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
4103 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4104 {
4105 pCtx->fpu.DS = 0;
4106 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
4107 }
4108 else
4109 {
4110 pCtx->fpu.DS = sel;
4111 pCtx->fpu.FPUDP = GCPtrEff;
4112 }
4113}
4114
4115
4116/**
4117 * Rotates the stack registers in the push direction.
4118 *
4119 * @param pCtx The CPU context.
4120 * @remarks This is a complete waste of time, but fxsave stores the registers in
4121 * stack order.
4122 */
4123DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
4124{
4125 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
4126 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
4127 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
4128 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
4129 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
4130 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
4131 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
4132 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
4133 pCtx->fpu.aRegs[0].r80 = r80Tmp;
4134}
4135
4136
4137/**
4138 * Rotates the stack registers in the pop direction.
4139 *
4140 * @param pCtx The CPU context.
4141 * @remarks This is a complete waste of time, but fxsave stores the registers in
4142 * stack order.
4143 */
4144DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
4145{
4146 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
4147 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
4148 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
4149 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
4150 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
4151 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
4152 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
4153 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
4154 pCtx->fpu.aRegs[7].r80 = r80Tmp;
4155}
4156
4157
4158/**
4159 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4160 * exception prevents it.
4161 *
4162 * @param pIemCpu The IEM per CPU data.
4163 * @param pResult The FPU operation result to push.
4164 * @param pCtx The CPU context.
4165 */
4166static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
4167{
4168 /* Update FSW and bail if there are pending exceptions afterwards. */
4169 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4170 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4171 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4172 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4173 {
4174 pCtx->fpu.FSW = fFsw;
4175 return;
4176 }
4177
4178 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4179 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4180 {
4181 /* All is fine, push the actual value. */
4182 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4183 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
4184 }
4185 else if (pCtx->fpu.FCW & X86_FCW_IM)
4186 {
4187 /* Masked stack overflow, push QNaN. */
4188 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4189 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4190 }
4191 else
4192 {
4193 /* Raise stack overflow, don't push anything. */
4194 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4195 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4196 return;
4197 }
4198
4199 fFsw &= ~X86_FSW_TOP_MASK;
4200 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4201 pCtx->fpu.FSW = fFsw;
4202
4203 iemFpuRotateStackPush(pCtx);
4204}
4205
4206
4207/**
4208 * Stores a result in a FPU register and updates the FSW and FTW.
4209 *
4210 * @param pIemCpu The IEM per CPU data.
4211 * @param pResult The result to store.
4212 * @param iStReg Which FPU register to store it in.
4213 * @param pCtx The CPU context.
4214 */
4215static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
4216{
4217 Assert(iStReg < 8);
4218 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4219 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4220 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4221 pCtx->fpu.FTW |= RT_BIT(iReg);
4222 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
4223}
4224
4225
4226/**
4227 * Only updates the FPU status word (FSW) with the result of the current
4228 * instruction.
4229 *
4230 * @param pCtx The CPU context.
4231 * @param u16FSW The FSW output of the current instruction.
4232 */
4233static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
4234{
4235 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4236 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4237}
4238
4239
4240/**
4241 * Pops one item off the FPU stack if no pending exception prevents it.
4242 *
4243 * @param pCtx The CPU context.
4244 */
4245static void iemFpuMaybePopOne(PCPUMCTX pCtx)
4246{
4247 /* Check pending exceptions. */
4248 uint16_t uFSW = pCtx->fpu.FSW;
4249 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4250 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4251 return;
4252
4253 /* TOP--. */
4254 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4255 uFSW &= ~X86_FSW_TOP_MASK;
4256 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4257 pCtx->fpu.FSW = uFSW;
4258
4259 /* Mark the previous ST0 as empty. */
4260 iOldTop >>= X86_FSW_TOP_SHIFT;
4261 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
4262
4263 /* Rotate the registers. */
4264 iemFpuRotateStackPop(pCtx);
4265}
4266
4267
4268/**
4269 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4270 *
4271 * @param pIemCpu The IEM per CPU data.
4272 * @param pResult The FPU operation result to push.
4273 */
4274static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
4275{
4276 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4277 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4278 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4279}
4280
4281
4282/**
4283 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4284 * and sets FPUDP and FPUDS.
4285 *
4286 * @param pIemCpu The IEM per CPU data.
4287 * @param pResult The FPU operation result to push.
4288 * @param iEffSeg The effective segment register.
4289 * @param GCPtrEff The effective address relative to @a iEffSeg.
4290 */
4291static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4292{
4293 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4294 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4295 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4296 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4297}
4298
4299
4300/**
4301 * Replace ST0 with the first value and push the second onto the FPU stack,
4302 * unless a pending exception prevents it.
4303 *
4304 * @param pIemCpu The IEM per CPU data.
4305 * @param pResult The FPU operation result to store and push.
4306 */
4307static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
4308{
4309 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4310 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4311
4312 /* Update FSW and bail if there are pending exceptions afterwards. */
4313 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4314 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4315 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4316 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4317 {
4318 pCtx->fpu.FSW = fFsw;
4319 return;
4320 }
4321
4322 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4323 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4324 {
4325 /* All is fine, push the actual value. */
4326 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4327 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
4328 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
4329 }
4330 else if (pCtx->fpu.FCW & X86_FCW_IM)
4331 {
4332 /* Masked stack overflow, push QNaN. */
4333 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4334 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4335 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4336 }
4337 else
4338 {
4339 /* Raise stack overflow, don't push anything. */
4340 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4341 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4342 return;
4343 }
4344
4345 fFsw &= ~X86_FSW_TOP_MASK;
4346 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4347 pCtx->fpu.FSW = fFsw;
4348
4349 iemFpuRotateStackPush(pCtx);
4350}
4351
4352
4353/**
4354 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4355 * FOP.
4356 *
4357 * @param pIemCpu The IEM per CPU data.
4358 * @param pResult The result to store.
4359 * @param iStReg Which FPU register to store it in.
4360 * @param pCtx The CPU context.
4361 */
4362static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4363{
4364 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4365 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4366 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4367}
4368
4369
4370/**
4371 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4372 * FOP, and then pops the stack.
4373 *
4374 * @param pIemCpu The IEM per CPU data.
4375 * @param pResult The result to store.
4376 * @param iStReg Which FPU register to store it in.
4377 * @param pCtx The CPU context.
4378 */
4379static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4380{
4381 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4382 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4383 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4384 iemFpuMaybePopOne(pCtx);
4385}
4386
4387
4388/**
4389 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4390 * FPUDP, and FPUDS.
4391 *
4392 * @param pIemCpu The IEM per CPU data.
4393 * @param pResult The result to store.
4394 * @param iStReg Which FPU register to store it in.
4395 * @param pCtx The CPU context.
4396 * @param iEffSeg The effective memory operand selector register.
4397 * @param GCPtrEff The effective memory operand offset.
4398 */
4399static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4400{
4401 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4402 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
4403 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4404 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4405}
4406
4407
4408/**
4409 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4410 * FPUDP, and FPUDS, and then pops the stack.
4411 *
4412 * @param pIemCpu The IEM per CPU data.
4413 * @param pResult The result to store.
4414 * @param iStReg Which FPU register to store it in.
4415 * @param pCtx The CPU context.
4416 * @param iEffSeg The effective memory operand selector register.
4417 * @param GCPtrEff The effective memory operand offset.
4418 */
4419static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4420 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4421{
4422 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4423 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4424 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4425 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4426 iemFpuMaybePopOne(pCtx);
4427}
4428
4429
4430/**
4431 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4432 *
4433 * @param pIemCpu The IEM per CPU data.
4434 */
4435static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4436{
4437 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4438}
4439
4440
4441/**
4442 * Marks the specified stack register as free (for FFREE).
4443 *
4444 * @param pIemCpu The IEM per CPU data.
4445 * @param iStReg The register to free.
4446 */
4447static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4448{
4449 Assert(iStReg < 8);
4450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4451 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4452 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4453}
4454
4455
4456/**
4457 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4458 *
4459 * @param pIemCpu The IEM per CPU data.
4460 */
4461static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4462{
4463 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4464 uint16_t uFsw = pCtx->fpu.FSW;
4465 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4466 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4467 uFsw &= ~X86_FSW_TOP_MASK;
4468 uFsw |= uTop;
4469 pCtx->fpu.FSW = uFsw;
4470}
4471
4472
4473/**
4474 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4475 *
4476 * @param pIemCpu The IEM per CPU data.
4477 */
4478static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4479{
4480 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4481 uint16_t uFsw = pCtx->fpu.FSW;
4482 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4483 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4484 uFsw &= ~X86_FSW_TOP_MASK;
4485 uFsw |= uTop;
4486 pCtx->fpu.FSW = uFsw;
4487}
4488
4489
4490/**
4491 * Updates the FSW, FOP, FPUIP, and FPUCS.
4492 *
4493 * @param pIemCpu The IEM per CPU data.
4494 * @param u16FSW The FSW from the current instruction.
4495 */
4496static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4497{
4498 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4499 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4500 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4501}
4502
4503
4504/**
4505 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4506 *
4507 * @param pIemCpu The IEM per CPU data.
4508 * @param u16FSW The FSW from the current instruction.
4509 */
4510static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4511{
4512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4513 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4514 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4515 iemFpuMaybePopOne(pCtx);
4516}
4517
4518
4519/**
4520 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4521 *
4522 * @param pIemCpu The IEM per CPU data.
4523 * @param u16FSW The FSW from the current instruction.
4524 * @param iEffSeg The effective memory operand selector register.
4525 * @param GCPtrEff The effective memory operand offset.
4526 */
4527static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4528{
4529 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4530 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4531 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4532 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4533}
4534
4535
4536/**
4537 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4538 *
4539 * @param pIemCpu The IEM per CPU data.
4540 * @param u16FSW The FSW from the current instruction.
4541 */
4542static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4543{
4544 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4545 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4546 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4547 iemFpuMaybePopOne(pCtx);
4548 iemFpuMaybePopOne(pCtx);
4549}
4550
4551
4552/**
4553 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4554 *
4555 * @param pIemCpu The IEM per CPU data.
4556 * @param u16FSW The FSW from the current instruction.
4557 * @param iEffSeg The effective memory operand selector register.
4558 * @param GCPtrEff The effective memory operand offset.
4559 */
4560static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4561{
4562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4563 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4564 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4565 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4566 iemFpuMaybePopOne(pCtx);
4567}
4568
4569
4570/**
4571 * Worker routine for raising an FPU stack underflow exception.
4572 *
4573 * @param pIemCpu The IEM per CPU data.
4574 * @param iStReg The stack register being accessed.
4575 * @param pCtx The CPU context.
4576 */
4577static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4578{
4579 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4580 if (pCtx->fpu.FCW & X86_FCW_IM)
4581 {
4582 /* Masked underflow. */
4583 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4584 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4585 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4586 if (iStReg != UINT8_MAX)
4587 {
4588 pCtx->fpu.FTW |= RT_BIT(iReg);
4589 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4590 }
4591 }
4592 else
4593 {
4594 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4595 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4596 }
4597}
4598
4599
4600/**
4601 * Raises a FPU stack underflow exception.
4602 *
4603 * @param pIemCpu The IEM per CPU data.
4604 * @param iStReg The destination register that should be loaded
4605 * with QNaN if \#IS is not masked. Specify
4606 * UINT8_MAX if none (like for fcom).
4607 */
4608DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4609{
4610 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4611 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4612 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4613}
4614
4615
4616DECL_NO_INLINE(static, void)
4617iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4618{
4619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4620 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4621 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4622 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4623}
4624
4625
4626DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4627{
4628 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4629 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4630 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4631 iemFpuMaybePopOne(pCtx);
4632}
4633
4634
4635DECL_NO_INLINE(static, void)
4636iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4637{
4638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4639 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4640 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4641 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4642 iemFpuMaybePopOne(pCtx);
4643}
4644
4645
4646DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4647{
4648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4649 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4650 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4651 iemFpuMaybePopOne(pCtx);
4652 iemFpuMaybePopOne(pCtx);
4653}
4654
4655
4656DECL_NO_INLINE(static, void)
4657iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4658{
4659 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4660 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4661
4662 if (pCtx->fpu.FCW & X86_FCW_IM)
4663 {
4664 /* Masked overflow - Push QNaN. */
4665 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4666 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4667 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4668 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4669 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4670 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4671 iemFpuRotateStackPush(pCtx);
4672 }
4673 else
4674 {
4675 /* Exception pending - don't change TOP or the register stack. */
4676 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4677 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4678 }
4679}
4680
4681
4682DECL_NO_INLINE(static, void)
4683iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4684{
4685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4686 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4687
4688 if (pCtx->fpu.FCW & X86_FCW_IM)
4689 {
4690 /* Masked overflow - Push QNaN. */
4691 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4692 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4693 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4694 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4695 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4696 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4697 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4698 iemFpuRotateStackPush(pCtx);
4699 }
4700 else
4701 {
4702 /* Exception pending - don't change TOP or the register stack. */
4703 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4704 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4705 }
4706}
4707
4708
4709/**
4710 * Worker routine for raising an FPU stack overflow exception on a push.
4711 *
4712 * @param pIemCpu The IEM per CPU data.
4713 * @param pCtx The CPU context.
4714 */
4715static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4716{
4717 if (pCtx->fpu.FCW & X86_FCW_IM)
4718 {
4719 /* Masked overflow. */
4720 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4721 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4722 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4723 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4724 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4725 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4726 iemFpuRotateStackPush(pCtx);
4727 }
4728 else
4729 {
4730 /* Exception pending - don't change TOP or the register stack. */
4731 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4732 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4733 }
4734}
4735
4736
4737/**
4738 * Raises a FPU stack overflow exception on a push.
4739 *
4740 * @param pIemCpu The IEM per CPU data.
4741 */
4742DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4743{
4744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4745 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4746 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4747}
4748
4749
4750/**
4751 * Raises a FPU stack overflow exception on a push with a memory operand.
4752 *
4753 * @param pIemCpu The IEM per CPU data.
4754 * @param iEffSeg The effective memory operand selector register.
4755 * @param GCPtrEff The effective memory operand offset.
4756 */
4757DECL_NO_INLINE(static, void)
4758iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4759{
4760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4761 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4762 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4763 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4764}
4765
4766
4767static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4768{
4769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4770 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4771 if (pCtx->fpu.FTW & RT_BIT(iReg))
4772 return VINF_SUCCESS;
4773 return VERR_NOT_FOUND;
4774}
4775
4776
4777static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4778{
4779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4780 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4781 if (pCtx->fpu.FTW & RT_BIT(iReg))
4782 {
4783 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4784 return VINF_SUCCESS;
4785 }
4786 return VERR_NOT_FOUND;
4787}
4788
4789
4790static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4791 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4792{
4793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4794 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4795 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4796 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4797 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4798 {
4799 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4800 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4801 return VINF_SUCCESS;
4802 }
4803 return VERR_NOT_FOUND;
4804}
4805
4806
4807static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4808{
4809 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4810 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4811 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4812 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4813 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4814 {
4815 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4816 return VINF_SUCCESS;
4817 }
4818 return VERR_NOT_FOUND;
4819}
4820
4821
4822/**
4823 * Updates the FPU exception status after FCW is changed.
4824 *
4825 * @param pCtx The CPU context.
4826 */
4827static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4828{
4829 uint16_t u16Fsw = pCtx->fpu.FSW;
4830 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4831 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4832 else
4833 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4834 pCtx->fpu.FSW = u16Fsw;
4835}
4836
4837
4838/**
4839 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4840 *
4841 * @returns The full FTW.
4842 * @param pCtx The CPU state.
4843 */
4844static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4845{
4846 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4847 uint16_t u16Ftw = 0;
4848 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4849 for (unsigned iSt = 0; iSt < 8; iSt++)
4850 {
4851 unsigned const iReg = (iSt + iTop) & 7;
4852 if (!(u8Ftw & RT_BIT(iReg)))
4853 u16Ftw |= 3 << (iReg * 2); /* empty */
4854 else
4855 {
4856 uint16_t uTag;
4857 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4858 if (pr80Reg->s.uExponent == 0x7fff)
4859 uTag = 2; /* Exponent is all 1's => Special. */
4860 else if (pr80Reg->s.uExponent == 0x0000)
4861 {
4862 if (pr80Reg->s.u64Mantissa == 0x0000)
4863 uTag = 1; /* All bits are zero => Zero. */
4864 else
4865 uTag = 2; /* Must be special. */
4866 }
4867 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4868 uTag = 0; /* Valid. */
4869 else
4870 uTag = 2; /* Must be special. */
4871
4872 u16Ftw |= uTag << (iReg * 2); /* empty */
4873 }
4874 }
4875
4876 return u16Ftw;
4877}
4878
4879
4880/**
4881 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4882 *
4883 * @returns The compressed FTW.
4884 * @param u16FullFtw The full FTW to convert.
4885 */
4886static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4887{
4888 uint8_t u8Ftw = 0;
4889 for (unsigned i = 0; i < 8; i++)
4890 {
4891 if ((u16FullFtw & 3) != 3 /*empty*/)
4892 u8Ftw |= RT_BIT(i);
4893 u16FullFtw >>= 2;
4894 }
4895
4896 return u8Ftw;
4897}
4898
4899/** @} */
4900
4901
4902/** @name Memory access.
4903 *
4904 * @{
4905 */
4906
4907
4908/**
4909 * Updates the IEMCPU::cbWritten counter if applicable.
4910 *
4911 * @param pIemCpu The IEM per CPU data.
4912 * @param fAccess The access being accounted for.
4913 * @param cbMem The access size.
4914 */
4915DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4916{
4917 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4918 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4919 pIemCpu->cbWritten += (uint32_t)cbMem;
4920}
4921
4922
4923/**
4924 * Checks if the given segment can be written to, raise the appropriate
4925 * exception if not.
4926 *
4927 * @returns VBox strict status code.
4928 *
4929 * @param pIemCpu The IEM per CPU data.
4930 * @param pHid Pointer to the hidden register.
4931 * @param iSegReg The register number.
4932 * @param pu64BaseAddr Where to return the base address to use for the
4933 * segment. (In 64-bit code it may differ from the
4934 * base in the hidden segment.)
4935 */
4936static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4937{
4938 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4939 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4940 else
4941 {
4942 if (!pHid->Attr.n.u1Present)
4943 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4944
4945 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4946 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4947 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4948 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4949 *pu64BaseAddr = pHid->u64Base;
4950 }
4951 return VINF_SUCCESS;
4952}
4953
4954
4955/**
4956 * Checks if the given segment can be read from, raise the appropriate
4957 * exception if not.
4958 *
4959 * @returns VBox strict status code.
4960 *
4961 * @param pIemCpu The IEM per CPU data.
4962 * @param pHid Pointer to the hidden register.
4963 * @param iSegReg The register number.
4964 * @param pu64BaseAddr Where to return the base address to use for the
4965 * segment. (In 64-bit code it may differ from the
4966 * base in the hidden segment.)
4967 */
4968static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4969{
4970 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4971 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4972 else
4973 {
4974 if (!pHid->Attr.n.u1Present)
4975 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4976
4977 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4978 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4979 *pu64BaseAddr = pHid->u64Base;
4980 }
4981 return VINF_SUCCESS;
4982}
4983
4984
4985/**
4986 * Applies the segment limit, base and attributes.
4987 *
4988 * This may raise a \#GP or \#SS.
4989 *
4990 * @returns VBox strict status code.
4991 *
4992 * @param pIemCpu The IEM per CPU data.
4993 * @param fAccess The kind of access which is being performed.
4994 * @param iSegReg The index of the segment register to apply.
4995 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4996 * TSS, ++).
4997 * @param pGCPtrMem Pointer to the guest memory address to apply
4998 * segmentation to. Input and output parameter.
4999 */
5000static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
5001 size_t cbMem, PRTGCPTR pGCPtrMem)
5002{
5003 if (iSegReg == UINT8_MAX)
5004 return VINF_SUCCESS;
5005
5006 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
5007 switch (pIemCpu->enmCpuMode)
5008 {
5009 case IEMMODE_16BIT:
5010 case IEMMODE_32BIT:
5011 {
5012 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5013 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5014
5015 Assert(pSel->Attr.n.u1Present);
5016 Assert(pSel->Attr.n.u1DescType);
5017 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5018 {
5019 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5020 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5021 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5022
5023 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5024 {
5025 /** @todo CPL check. */
5026 }
5027
5028 /*
5029 * There are two kinds of data selectors, normal and expand down.
5030 */
5031 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5032 {
5033 if ( GCPtrFirst32 > pSel->u32Limit
5034 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5035 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5036 }
5037 else
5038 {
5039 /*
5040 * The upper boundary is defined by the B bit, not the G bit!
5041 */
5042 if ( GCPtrFirst32 < pSel->u32Limit + 1
5043 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? 0xFFFFFFFF : 0xFFFF))
5044 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5045
5046 }
5047 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5048 }
5049 else
5050 {
5051
5052 /*
5053 * Code selector and usually be used to read thru, writing is
5054 * only permitted in real and V8086 mode.
5055 */
5056 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5057 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5058 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5059 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
5060 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5061
5062 if ( GCPtrFirst32 > pSel->u32Limit
5063 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5064 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5065
5066 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5067 {
5068 /** @todo CPL check. */
5069 }
5070
5071 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5072 }
5073 return VINF_SUCCESS;
5074 }
5075
5076 case IEMMODE_64BIT:
5077 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5078 *pGCPtrMem += pSel->u64Base;
5079 return VINF_SUCCESS;
5080
5081 default:
5082 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
5083 }
5084}
5085
5086
5087/**
5088 * Translates a virtual address to a physical physical address and checks if we
5089 * can access the page as specified.
5090 *
5091 * @param pIemCpu The IEM per CPU data.
5092 * @param GCPtrMem The virtual address.
5093 * @param fAccess The intended access.
5094 * @param pGCPhysMem Where to return the physical address.
5095 */
5096static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
5097 PRTGCPHYS pGCPhysMem)
5098{
5099 /** @todo Need a different PGM interface here. We're currently using
5100 * generic / REM interfaces. this won't cut it for R0 & RC. */
5101 RTGCPHYS GCPhys;
5102 uint64_t fFlags;
5103 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
5104 if (RT_FAILURE(rc))
5105 {
5106 /** @todo Check unassigned memory in unpaged mode. */
5107 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5108 *pGCPhysMem = NIL_RTGCPHYS;
5109 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
5110 }
5111
5112 /* If the page is writable and does not have the no-exec bit set, all
5113 access is allowed. Otherwise we'll have to check more carefully... */
5114 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5115 {
5116 /* Write to read only memory? */
5117 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5118 && !(fFlags & X86_PTE_RW)
5119 && ( pIemCpu->uCpl != 0
5120 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
5121 {
5122 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5123 *pGCPhysMem = NIL_RTGCPHYS;
5124 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5125 }
5126
5127 /* Kernel memory accessed by userland? */
5128 if ( !(fFlags & X86_PTE_US)
5129 && pIemCpu->uCpl == 3
5130 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5131 {
5132 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5133 *pGCPhysMem = NIL_RTGCPHYS;
5134 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5135 }
5136
5137 /* Executing non-executable memory? */
5138 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5139 && (fFlags & X86_PTE_PAE_NX)
5140 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
5141 {
5142 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5143 *pGCPhysMem = NIL_RTGCPHYS;
5144 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5145 VERR_ACCESS_DENIED);
5146 }
5147 }
5148
5149 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
5150 *pGCPhysMem = GCPhys;
5151 return VINF_SUCCESS;
5152}
5153
5154
5155
5156/**
5157 * Maps a physical page.
5158 *
5159 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
5160 * @param pIemCpu The IEM per CPU data.
5161 * @param GCPhysMem The physical address.
5162 * @param fAccess The intended access.
5163 * @param ppvMem Where to return the mapping address.
5164 * @param pLock The PGM lock.
5165 */
5166static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
5167{
5168#ifdef IEM_VERIFICATION_MODE_FULL
5169 /* Force the alternative path so we can ignore writes. */
5170 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
5171 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5172#endif
5173#ifdef IEM_LOG_MEMORY_WRITES
5174 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5175 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5176#endif
5177#ifdef IEM_VERIFICATION_MODE_MINIMAL
5178 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5179#endif
5180
5181 /** @todo This API may require some improving later. A private deal with PGM
5182 * regarding locking and unlocking needs to be struct. A couple of TLBs
5183 * living in PGM, but with publicly accessible inlined access methods
5184 * could perhaps be an even better solution. */
5185 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
5186 GCPhysMem,
5187 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
5188 pIemCpu->fBypassHandlers,
5189 ppvMem,
5190 pLock);
5191 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
5192 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5193 return rc;
5194}
5195
5196
5197/**
5198 * Unmap a page previously mapped by iemMemPageMap.
5199 *
5200 * @param pIemCpu The IEM per CPU data.
5201 * @param GCPhysMem The physical address.
5202 * @param fAccess The intended access.
5203 * @param pvMem What iemMemPageMap returned.
5204 * @param pLock The PGM lock.
5205 */
5206DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
5207{
5208 NOREF(pIemCpu);
5209 NOREF(GCPhysMem);
5210 NOREF(fAccess);
5211 NOREF(pvMem);
5212 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
5213}
5214
5215
5216/**
5217 * Looks up a memory mapping entry.
5218 *
5219 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5220 * @param pIemCpu The IEM per CPU data.
5221 * @param pvMem The memory address.
5222 * @param fAccess The access to.
5223 */
5224DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5225{
5226 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5227 if ( pIemCpu->aMemMappings[0].pv == pvMem
5228 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5229 return 0;
5230 if ( pIemCpu->aMemMappings[1].pv == pvMem
5231 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5232 return 1;
5233 if ( pIemCpu->aMemMappings[2].pv == pvMem
5234 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5235 return 2;
5236 return VERR_NOT_FOUND;
5237}
5238
5239
5240/**
5241 * Finds a free memmap entry when using iNextMapping doesn't work.
5242 *
5243 * @returns Memory mapping index, 1024 on failure.
5244 * @param pIemCpu The IEM per CPU data.
5245 */
5246static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
5247{
5248 /*
5249 * The easy case.
5250 */
5251 if (pIemCpu->cActiveMappings == 0)
5252 {
5253 pIemCpu->iNextMapping = 1;
5254 return 0;
5255 }
5256
5257 /* There should be enough mappings for all instructions. */
5258 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
5259
5260 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
5261 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5262 return i;
5263
5264 AssertFailedReturn(1024);
5265}
5266
5267
5268/**
5269 * Commits a bounce buffer that needs writing back and unmaps it.
5270 *
5271 * @returns Strict VBox status code.
5272 * @param pIemCpu The IEM per CPU data.
5273 * @param iMemMap The index of the buffer to commit.
5274 */
5275static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
5276{
5277 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5278 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5279
5280 /*
5281 * Do the writing.
5282 */
5283 int rc;
5284#ifndef IEM_VERIFICATION_MODE_MINIMAL
5285 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
5286 && !IEM_VERIFICATION_ENABLED(pIemCpu))
5287 {
5288 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5289 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5290 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5291 if (!pIemCpu->fBypassHandlers)
5292 {
5293 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5294 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5295 pbBuf,
5296 cbFirst);
5297 if (cbSecond && rc == VINF_SUCCESS)
5298 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5299 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5300 pbBuf + cbFirst,
5301 cbSecond);
5302 }
5303 else
5304 {
5305 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5306 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5307 pbBuf,
5308 cbFirst);
5309 if (cbSecond && rc == VINF_SUCCESS)
5310 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5311 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5312 pbBuf + cbFirst,
5313 cbSecond);
5314 }
5315 if (rc != VINF_SUCCESS)
5316 {
5317 /** @todo status code handling */
5318 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5319 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
5320 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5321 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5322 }
5323 }
5324 else
5325#endif
5326 rc = VINF_SUCCESS;
5327
5328#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5329 /*
5330 * Record the write(s).
5331 */
5332 if (!pIemCpu->fNoRem)
5333 {
5334 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5335 if (pEvtRec)
5336 {
5337 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5338 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
5339 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5340 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
5341 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
5342 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5343 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5344 }
5345 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5346 {
5347 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5348 if (pEvtRec)
5349 {
5350 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5351 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
5352 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5353 memcpy(pEvtRec->u.RamWrite.ab,
5354 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
5355 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
5356 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5357 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5358 }
5359 }
5360 }
5361#endif
5362#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
5363 if (rc == VINF_SUCCESS)
5364 {
5365 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5366 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
5367 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5368 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5369 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
5370 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
5371
5372 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5373 g_cbIemWrote = cbWrote;
5374 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5375 }
5376#endif
5377
5378 /*
5379 * Free the mapping entry.
5380 */
5381 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5382 Assert(pIemCpu->cActiveMappings != 0);
5383 pIemCpu->cActiveMappings--;
5384 return rc;
5385}
5386
5387
5388/**
5389 * iemMemMap worker that deals with a request crossing pages.
5390 */
5391static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
5392 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5393{
5394 /*
5395 * Do the address translations.
5396 */
5397 RTGCPHYS GCPhysFirst;
5398 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5399 if (rcStrict != VINF_SUCCESS)
5400 return rcStrict;
5401
5402/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
5403 * last byte. */
5404 RTGCPHYS GCPhysSecond;
5405 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
5406 if (rcStrict != VINF_SUCCESS)
5407 return rcStrict;
5408 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
5409
5410 /*
5411 * Read in the current memory content if it's a read, execute or partial
5412 * write access.
5413 */
5414 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5415 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
5416 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5417
5418 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5419 {
5420 int rc;
5421 if (!pIemCpu->fBypassHandlers)
5422 {
5423 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5424 if (rc != VINF_SUCCESS)
5425 {
5426 /** @todo status code handling */
5427 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5428 return rc;
5429 }
5430 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5431 if (rc != VINF_SUCCESS)
5432 {
5433 /** @todo status code handling */
5434 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5435 return rc;
5436 }
5437 }
5438 else
5439 {
5440 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5441 if (rc != VINF_SUCCESS)
5442 {
5443 /** @todo status code handling */
5444 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5445 return rc;
5446 }
5447 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5448 if (rc != VINF_SUCCESS)
5449 {
5450 /** @todo status code handling */
5451 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5452 return rc;
5453 }
5454 }
5455
5456#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5457 if ( !pIemCpu->fNoRem
5458 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5459 {
5460 /*
5461 * Record the reads.
5462 */
5463 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5464 if (pEvtRec)
5465 {
5466 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5467 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5468 pEvtRec->u.RamRead.cb = cbFirstPage;
5469 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5470 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5471 }
5472 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5473 if (pEvtRec)
5474 {
5475 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5476 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5477 pEvtRec->u.RamRead.cb = cbSecondPage;
5478 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5479 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5480 }
5481 }
5482#endif
5483 }
5484#ifdef VBOX_STRICT
5485 else
5486 memset(pbBuf, 0xcc, cbMem);
5487#endif
5488#ifdef VBOX_STRICT
5489 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5490 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5491#endif
5492
5493 /*
5494 * Commit the bounce buffer entry.
5495 */
5496 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5497 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5498 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5499 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5500 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5501 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5502 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5503 pIemCpu->cActiveMappings++;
5504
5505 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5506 *ppvMem = pbBuf;
5507 return VINF_SUCCESS;
5508}
5509
5510
5511/**
5512 * iemMemMap woker that deals with iemMemPageMap failures.
5513 */
5514static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5515 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5516{
5517 /*
5518 * Filter out conditions we can handle and the ones which shouldn't happen.
5519 */
5520 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5521 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5522 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5523 {
5524 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5525 return rcMap;
5526 }
5527 pIemCpu->cPotentialExits++;
5528
5529 /*
5530 * Read in the current memory content if it's a read, execute or partial
5531 * write access.
5532 */
5533 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5534 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5535 {
5536 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5537 memset(pbBuf, 0xff, cbMem);
5538 else
5539 {
5540 int rc;
5541 if (!pIemCpu->fBypassHandlers)
5542 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5543 else
5544 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5545 if (rc != VINF_SUCCESS)
5546 {
5547 /** @todo status code handling */
5548 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5549 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5550 return rc;
5551 }
5552 }
5553
5554#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5555 if ( !pIemCpu->fNoRem
5556 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5557 {
5558 /*
5559 * Record the read.
5560 */
5561 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5562 if (pEvtRec)
5563 {
5564 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5565 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5566 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5567 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5568 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5569 }
5570 }
5571#endif
5572 }
5573#ifdef VBOX_STRICT
5574 else
5575 memset(pbBuf, 0xcc, cbMem);
5576#endif
5577#ifdef VBOX_STRICT
5578 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5579 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5580#endif
5581
5582 /*
5583 * Commit the bounce buffer entry.
5584 */
5585 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5586 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5587 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5588 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5589 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5590 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5591 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5592 pIemCpu->cActiveMappings++;
5593
5594 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5595 *ppvMem = pbBuf;
5596 return VINF_SUCCESS;
5597}
5598
5599
5600
5601/**
5602 * Maps the specified guest memory for the given kind of access.
5603 *
5604 * This may be using bounce buffering of the memory if it's crossing a page
5605 * boundary or if there is an access handler installed for any of it. Because
5606 * of lock prefix guarantees, we're in for some extra clutter when this
5607 * happens.
5608 *
5609 * This may raise a \#GP, \#SS, \#PF or \#AC.
5610 *
5611 * @returns VBox strict status code.
5612 *
5613 * @param pIemCpu The IEM per CPU data.
5614 * @param ppvMem Where to return the pointer to the mapped
5615 * memory.
5616 * @param cbMem The number of bytes to map. This is usually 1,
5617 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5618 * string operations it can be up to a page.
5619 * @param iSegReg The index of the segment register to use for
5620 * this access. The base and limits are checked.
5621 * Use UINT8_MAX to indicate that no segmentation
5622 * is required (for IDT, GDT and LDT accesses).
5623 * @param GCPtrMem The address of the guest memory.
5624 * @param a_fAccess How the memory is being accessed. The
5625 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5626 * how to map the memory, while the
5627 * IEM_ACCESS_WHAT_XXX bit is used when raising
5628 * exceptions.
5629 */
5630static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5631{
5632 /*
5633 * Check the input and figure out which mapping entry to use.
5634 */
5635 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */
5636 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5637
5638 unsigned iMemMap = pIemCpu->iNextMapping;
5639 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5640 {
5641 iMemMap = iemMemMapFindFree(pIemCpu);
5642 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5643 }
5644
5645 /*
5646 * Map the memory, checking that we can actually access it. If something
5647 * slightly complicated happens, fall back on bounce buffering.
5648 */
5649 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5650 if (rcStrict != VINF_SUCCESS)
5651 return rcStrict;
5652
5653 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5654 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5655
5656 RTGCPHYS GCPhysFirst;
5657 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5658 if (rcStrict != VINF_SUCCESS)
5659 return rcStrict;
5660
5661 void *pvMem;
5662 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5663 if (rcStrict != VINF_SUCCESS)
5664 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5665
5666 /*
5667 * Fill in the mapping table entry.
5668 */
5669 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5670 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5671 pIemCpu->iNextMapping = iMemMap + 1;
5672 pIemCpu->cActiveMappings++;
5673
5674 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5675 *ppvMem = pvMem;
5676 return VINF_SUCCESS;
5677}
5678
5679
5680/**
5681 * Commits the guest memory if bounce buffered and unmaps it.
5682 *
5683 * @returns Strict VBox status code.
5684 * @param pIemCpu The IEM per CPU data.
5685 * @param pvMem The mapping.
5686 * @param fAccess The kind of access.
5687 */
5688static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5689{
5690 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5691 AssertReturn(iMemMap >= 0, iMemMap);
5692
5693 /* If it's bounce buffered, we may need to write back the buffer. */
5694 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5695 {
5696 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5697 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5698 }
5699 /* Otherwise unlock it. */
5700 else
5701 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5702
5703 /* Free the entry. */
5704 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5705 Assert(pIemCpu->cActiveMappings != 0);
5706 pIemCpu->cActiveMappings--;
5707 return VINF_SUCCESS;
5708}
5709
5710
5711/**
5712 * Fetches a data byte.
5713 *
5714 * @returns Strict VBox status code.
5715 * @param pIemCpu The IEM per CPU data.
5716 * @param pu8Dst Where to return the byte.
5717 * @param iSegReg The index of the segment register to use for
5718 * this access. The base and limits are checked.
5719 * @param GCPtrMem The address of the guest memory.
5720 */
5721static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5722{
5723 /* The lazy approach for now... */
5724 uint8_t const *pu8Src;
5725 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5726 if (rc == VINF_SUCCESS)
5727 {
5728 *pu8Dst = *pu8Src;
5729 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5730 }
5731 return rc;
5732}
5733
5734
5735/**
5736 * Fetches a data word.
5737 *
5738 * @returns Strict VBox status code.
5739 * @param pIemCpu The IEM per CPU data.
5740 * @param pu16Dst Where to return the word.
5741 * @param iSegReg The index of the segment register to use for
5742 * this access. The base and limits are checked.
5743 * @param GCPtrMem The address of the guest memory.
5744 */
5745static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5746{
5747 /* The lazy approach for now... */
5748 uint16_t const *pu16Src;
5749 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5750 if (rc == VINF_SUCCESS)
5751 {
5752 *pu16Dst = *pu16Src;
5753 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5754 }
5755 return rc;
5756}
5757
5758
5759/**
5760 * Fetches a data dword.
5761 *
5762 * @returns Strict VBox status code.
5763 * @param pIemCpu The IEM per CPU data.
5764 * @param pu32Dst Where to return the dword.
5765 * @param iSegReg The index of the segment register to use for
5766 * this access. The base and limits are checked.
5767 * @param GCPtrMem The address of the guest memory.
5768 */
5769static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5770{
5771 /* The lazy approach for now... */
5772 uint32_t const *pu32Src;
5773 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5774 if (rc == VINF_SUCCESS)
5775 {
5776 *pu32Dst = *pu32Src;
5777 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5778 }
5779 return rc;
5780}
5781
5782
5783#ifdef SOME_UNUSED_FUNCTION
5784/**
5785 * Fetches a data dword and sign extends it to a qword.
5786 *
5787 * @returns Strict VBox status code.
5788 * @param pIemCpu The IEM per CPU data.
5789 * @param pu64Dst Where to return the sign extended value.
5790 * @param iSegReg The index of the segment register to use for
5791 * this access. The base and limits are checked.
5792 * @param GCPtrMem The address of the guest memory.
5793 */
5794static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5795{
5796 /* The lazy approach for now... */
5797 int32_t const *pi32Src;
5798 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5799 if (rc == VINF_SUCCESS)
5800 {
5801 *pu64Dst = *pi32Src;
5802 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5803 }
5804#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5805 else
5806 *pu64Dst = 0;
5807#endif
5808 return rc;
5809}
5810#endif
5811
5812
5813/**
5814 * Fetches a data qword.
5815 *
5816 * @returns Strict VBox status code.
5817 * @param pIemCpu The IEM per CPU data.
5818 * @param pu64Dst Where to return the qword.
5819 * @param iSegReg The index of the segment register to use for
5820 * this access. The base and limits are checked.
5821 * @param GCPtrMem The address of the guest memory.
5822 */
5823static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5824{
5825 /* The lazy approach for now... */
5826 uint64_t const *pu64Src;
5827 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5828 if (rc == VINF_SUCCESS)
5829 {
5830 *pu64Dst = *pu64Src;
5831 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5832 }
5833 return rc;
5834}
5835
5836
5837/**
5838 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
5839 *
5840 * @returns Strict VBox status code.
5841 * @param pIemCpu The IEM per CPU data.
5842 * @param pu64Dst Where to return the qword.
5843 * @param iSegReg The index of the segment register to use for
5844 * this access. The base and limits are checked.
5845 * @param GCPtrMem The address of the guest memory.
5846 */
5847static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5848{
5849 /* The lazy approach for now... */
5850 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
5851 if (RT_UNLIKELY(GCPtrMem & 15))
5852 return iemRaiseGeneralProtectionFault0(pIemCpu);
5853
5854 uint64_t const *pu64Src;
5855 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5856 if (rc == VINF_SUCCESS)
5857 {
5858 *pu64Dst = *pu64Src;
5859 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5860 }
5861 return rc;
5862}
5863
5864
5865/**
5866 * Fetches a data tword.
5867 *
5868 * @returns Strict VBox status code.
5869 * @param pIemCpu The IEM per CPU data.
5870 * @param pr80Dst Where to return the tword.
5871 * @param iSegReg The index of the segment register to use for
5872 * this access. The base and limits are checked.
5873 * @param GCPtrMem The address of the guest memory.
5874 */
5875static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5876{
5877 /* The lazy approach for now... */
5878 PCRTFLOAT80U pr80Src;
5879 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5880 if (rc == VINF_SUCCESS)
5881 {
5882 *pr80Dst = *pr80Src;
5883 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5884 }
5885 return rc;
5886}
5887
5888
5889/**
5890 * Fetches a data dqword (double qword), generally SSE related.
5891 *
5892 * @returns Strict VBox status code.
5893 * @param pIemCpu The IEM per CPU data.
5894 * @param pu128Dst Where to return the qword.
5895 * @param iSegReg The index of the segment register to use for
5896 * this access. The base and limits are checked.
5897 * @param GCPtrMem The address of the guest memory.
5898 */
5899static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5900{
5901 /* The lazy approach for now... */
5902 uint128_t const *pu128Src;
5903 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5904 if (rc == VINF_SUCCESS)
5905 {
5906 *pu128Dst = *pu128Src;
5907 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
5908 }
5909 return rc;
5910}
5911
5912
5913/**
5914 * Fetches a data dqword (double qword) at an aligned address, generally SSE
5915 * related.
5916 *
5917 * Raises GP(0) if not aligned.
5918 *
5919 * @returns Strict VBox status code.
5920 * @param pIemCpu The IEM per CPU data.
5921 * @param pu128Dst Where to return the qword.
5922 * @param iSegReg The index of the segment register to use for
5923 * this access. The base and limits are checked.
5924 * @param GCPtrMem The address of the guest memory.
5925 */
5926static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5927{
5928 /* The lazy approach for now... */
5929 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
5930 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
5931 return iemRaiseGeneralProtectionFault0(pIemCpu);
5932
5933 uint128_t const *pu128Src;
5934 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5935 if (rc == VINF_SUCCESS)
5936 {
5937 *pu128Dst = *pu128Src;
5938 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
5939 }
5940 return rc;
5941}
5942
5943
5944
5945
5946/**
5947 * Fetches a descriptor register (lgdt, lidt).
5948 *
5949 * @returns Strict VBox status code.
5950 * @param pIemCpu The IEM per CPU data.
5951 * @param pcbLimit Where to return the limit.
5952 * @param pGCPTrBase Where to return the base.
5953 * @param iSegReg The index of the segment register to use for
5954 * this access. The base and limits are checked.
5955 * @param GCPtrMem The address of the guest memory.
5956 * @param enmOpSize The effective operand size.
5957 */
5958static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5959 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5960{
5961 uint8_t const *pu8Src;
5962 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5963 (void **)&pu8Src,
5964 enmOpSize == IEMMODE_64BIT
5965 ? 2 + 8
5966 : enmOpSize == IEMMODE_32BIT
5967 ? 2 + 4
5968 : 2 + 3,
5969 iSegReg,
5970 GCPtrMem,
5971 IEM_ACCESS_DATA_R);
5972 if (rcStrict == VINF_SUCCESS)
5973 {
5974 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5975 switch (enmOpSize)
5976 {
5977 case IEMMODE_16BIT:
5978 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5979 break;
5980 case IEMMODE_32BIT:
5981 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5982 break;
5983 case IEMMODE_64BIT:
5984 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5985 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5986 break;
5987
5988 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5989 }
5990 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5991 }
5992 return rcStrict;
5993}
5994
5995
5996
5997/**
5998 * Stores a data byte.
5999 *
6000 * @returns Strict VBox status code.
6001 * @param pIemCpu The IEM per CPU data.
6002 * @param iSegReg The index of the segment register to use for
6003 * this access. The base and limits are checked.
6004 * @param GCPtrMem The address of the guest memory.
6005 * @param u8Value The value to store.
6006 */
6007static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
6008{
6009 /* The lazy approach for now... */
6010 uint8_t *pu8Dst;
6011 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6012 if (rc == VINF_SUCCESS)
6013 {
6014 *pu8Dst = u8Value;
6015 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
6016 }
6017 return rc;
6018}
6019
6020
6021/**
6022 * Stores a data word.
6023 *
6024 * @returns Strict VBox status code.
6025 * @param pIemCpu The IEM per CPU data.
6026 * @param iSegReg The index of the segment register to use for
6027 * this access. The base and limits are checked.
6028 * @param GCPtrMem The address of the guest memory.
6029 * @param u16Value The value to store.
6030 */
6031static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
6032{
6033 /* The lazy approach for now... */
6034 uint16_t *pu16Dst;
6035 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6036 if (rc == VINF_SUCCESS)
6037 {
6038 *pu16Dst = u16Value;
6039 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
6040 }
6041 return rc;
6042}
6043
6044
6045/**
6046 * Stores a data dword.
6047 *
6048 * @returns Strict VBox status code.
6049 * @param pIemCpu The IEM per CPU data.
6050 * @param iSegReg The index of the segment register to use for
6051 * this access. The base and limits are checked.
6052 * @param GCPtrMem The address of the guest memory.
6053 * @param u32Value The value to store.
6054 */
6055static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
6056{
6057 /* The lazy approach for now... */
6058 uint32_t *pu32Dst;
6059 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6060 if (rc == VINF_SUCCESS)
6061 {
6062 *pu32Dst = u32Value;
6063 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
6064 }
6065 return rc;
6066}
6067
6068
6069/**
6070 * Stores a data qword.
6071 *
6072 * @returns Strict VBox status code.
6073 * @param pIemCpu The IEM per CPU data.
6074 * @param iSegReg The index of the segment register to use for
6075 * this access. The base and limits are checked.
6076 * @param GCPtrMem The address of the guest memory.
6077 * @param u64Value The value to store.
6078 */
6079static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
6080{
6081 /* The lazy approach for now... */
6082 uint64_t *pu64Dst;
6083 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6084 if (rc == VINF_SUCCESS)
6085 {
6086 *pu64Dst = u64Value;
6087 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
6088 }
6089 return rc;
6090}
6091
6092
6093/**
6094 * Stores a data dqword.
6095 *
6096 * @returns Strict VBox status code.
6097 * @param pIemCpu The IEM per CPU data.
6098 * @param iSegReg The index of the segment register to use for
6099 * this access. The base and limits are checked.
6100 * @param GCPtrMem The address of the guest memory.
6101 * @param u64Value The value to store.
6102 */
6103static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6104{
6105 /* The lazy approach for now... */
6106 uint128_t *pu128Dst;
6107 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6108 if (rc == VINF_SUCCESS)
6109 {
6110 *pu128Dst = u128Value;
6111 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6112 }
6113 return rc;
6114}
6115
6116
6117/**
6118 * Stores a data dqword, SSE aligned.
6119 *
6120 * @returns Strict VBox status code.
6121 * @param pIemCpu The IEM per CPU data.
6122 * @param iSegReg The index of the segment register to use for
6123 * this access. The base and limits are checked.
6124 * @param GCPtrMem The address of the guest memory.
6125 * @param u64Value The value to store.
6126 */
6127static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6128{
6129 /* The lazy approach for now... */
6130 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6131 return iemRaiseGeneralProtectionFault0(pIemCpu);
6132
6133 uint128_t *pu128Dst;
6134 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6135 if (rc == VINF_SUCCESS)
6136 {
6137 *pu128Dst = u128Value;
6138 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6139 }
6140 return rc;
6141}
6142
6143
6144/**
6145 * Stores a descriptor register (sgdt, sidt).
6146 *
6147 * @returns Strict VBox status code.
6148 * @param pIemCpu The IEM per CPU data.
6149 * @param cbLimit The limit.
6150 * @param GCPTrBase The base address.
6151 * @param iSegReg The index of the segment register to use for
6152 * this access. The base and limits are checked.
6153 * @param GCPtrMem The address of the guest memory.
6154 * @param enmOpSize The effective operand size.
6155 */
6156static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
6157 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6158{
6159 uint8_t *pu8Src;
6160 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6161 (void **)&pu8Src,
6162 enmOpSize == IEMMODE_64BIT
6163 ? 2 + 8
6164 : enmOpSize == IEMMODE_32BIT
6165 ? 2 + 4
6166 : 2 + 3,
6167 iSegReg,
6168 GCPtrMem,
6169 IEM_ACCESS_DATA_W);
6170 if (rcStrict == VINF_SUCCESS)
6171 {
6172 pu8Src[0] = RT_BYTE1(cbLimit);
6173 pu8Src[1] = RT_BYTE2(cbLimit);
6174 pu8Src[2] = RT_BYTE1(GCPtrBase);
6175 pu8Src[3] = RT_BYTE2(GCPtrBase);
6176 pu8Src[4] = RT_BYTE3(GCPtrBase);
6177 if (enmOpSize == IEMMODE_16BIT)
6178 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
6179 else
6180 {
6181 pu8Src[5] = RT_BYTE4(GCPtrBase);
6182 if (enmOpSize == IEMMODE_64BIT)
6183 {
6184 pu8Src[6] = RT_BYTE5(GCPtrBase);
6185 pu8Src[7] = RT_BYTE6(GCPtrBase);
6186 pu8Src[8] = RT_BYTE7(GCPtrBase);
6187 pu8Src[9] = RT_BYTE8(GCPtrBase);
6188 }
6189 }
6190 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
6191 }
6192 return rcStrict;
6193}
6194
6195
6196/**
6197 * Pushes a word onto the stack.
6198 *
6199 * @returns Strict VBox status code.
6200 * @param pIemCpu The IEM per CPU data.
6201 * @param u16Value The value to push.
6202 */
6203static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
6204{
6205 /* Increment the stack pointer. */
6206 uint64_t uNewRsp;
6207 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6208 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
6209
6210 /* Write the word the lazy way. */
6211 uint16_t *pu16Dst;
6212 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6213 if (rc == VINF_SUCCESS)
6214 {
6215 *pu16Dst = u16Value;
6216 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6217 }
6218
6219 /* Commit the new RSP value unless we an access handler made trouble. */
6220 if (rc == VINF_SUCCESS)
6221 pCtx->rsp = uNewRsp;
6222
6223 return rc;
6224}
6225
6226
6227/**
6228 * Pushes a dword onto the stack.
6229 *
6230 * @returns Strict VBox status code.
6231 * @param pIemCpu The IEM per CPU data.
6232 * @param u32Value The value to push.
6233 */
6234static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
6235{
6236 /* Increment the stack pointer. */
6237 uint64_t uNewRsp;
6238 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6239 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
6240
6241 /* Write the word the lazy way. */
6242 uint32_t *pu32Dst;
6243 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6244 if (rc == VINF_SUCCESS)
6245 {
6246 *pu32Dst = u32Value;
6247 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6248 }
6249
6250 /* Commit the new RSP value unless we an access handler made trouble. */
6251 if (rc == VINF_SUCCESS)
6252 pCtx->rsp = uNewRsp;
6253
6254 return rc;
6255}
6256
6257
6258/**
6259 * Pushes a qword onto the stack.
6260 *
6261 * @returns Strict VBox status code.
6262 * @param pIemCpu The IEM per CPU data.
6263 * @param u64Value The value to push.
6264 */
6265static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
6266{
6267 /* Increment the stack pointer. */
6268 uint64_t uNewRsp;
6269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6270 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
6271
6272 /* Write the word the lazy way. */
6273 uint64_t *pu64Dst;
6274 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6275 if (rc == VINF_SUCCESS)
6276 {
6277 *pu64Dst = u64Value;
6278 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6279 }
6280
6281 /* Commit the new RSP value unless we an access handler made trouble. */
6282 if (rc == VINF_SUCCESS)
6283 pCtx->rsp = uNewRsp;
6284
6285 return rc;
6286}
6287
6288
6289/**
6290 * Pops a word from the stack.
6291 *
6292 * @returns Strict VBox status code.
6293 * @param pIemCpu The IEM per CPU data.
6294 * @param pu16Value Where to store the popped value.
6295 */
6296static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
6297{
6298 /* Increment the stack pointer. */
6299 uint64_t uNewRsp;
6300 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6301 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
6302
6303 /* Write the word the lazy way. */
6304 uint16_t const *pu16Src;
6305 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6306 if (rc == VINF_SUCCESS)
6307 {
6308 *pu16Value = *pu16Src;
6309 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6310
6311 /* Commit the new RSP value. */
6312 if (rc == VINF_SUCCESS)
6313 pCtx->rsp = uNewRsp;
6314 }
6315
6316 return rc;
6317}
6318
6319
6320/**
6321 * Pops a dword from the stack.
6322 *
6323 * @returns Strict VBox status code.
6324 * @param pIemCpu The IEM per CPU data.
6325 * @param pu32Value Where to store the popped value.
6326 */
6327static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
6328{
6329 /* Increment the stack pointer. */
6330 uint64_t uNewRsp;
6331 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6332 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
6333
6334 /* Write the word the lazy way. */
6335 uint32_t const *pu32Src;
6336 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6337 if (rc == VINF_SUCCESS)
6338 {
6339 *pu32Value = *pu32Src;
6340 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6341
6342 /* Commit the new RSP value. */
6343 if (rc == VINF_SUCCESS)
6344 pCtx->rsp = uNewRsp;
6345 }
6346
6347 return rc;
6348}
6349
6350
6351/**
6352 * Pops a qword from the stack.
6353 *
6354 * @returns Strict VBox status code.
6355 * @param pIemCpu The IEM per CPU data.
6356 * @param pu64Value Where to store the popped value.
6357 */
6358static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
6359{
6360 /* Increment the stack pointer. */
6361 uint64_t uNewRsp;
6362 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6363 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
6364
6365 /* Write the word the lazy way. */
6366 uint64_t const *pu64Src;
6367 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6368 if (rc == VINF_SUCCESS)
6369 {
6370 *pu64Value = *pu64Src;
6371 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6372
6373 /* Commit the new RSP value. */
6374 if (rc == VINF_SUCCESS)
6375 pCtx->rsp = uNewRsp;
6376 }
6377
6378 return rc;
6379}
6380
6381
6382/**
6383 * Pushes a word onto the stack, using a temporary stack pointer.
6384 *
6385 * @returns Strict VBox status code.
6386 * @param pIemCpu The IEM per CPU data.
6387 * @param u16Value The value to push.
6388 * @param pTmpRsp Pointer to the temporary stack pointer.
6389 */
6390static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
6391{
6392 /* Increment the stack pointer. */
6393 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6394 RTUINT64U NewRsp = *pTmpRsp;
6395 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
6396
6397 /* Write the word the lazy way. */
6398 uint16_t *pu16Dst;
6399 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6400 if (rc == VINF_SUCCESS)
6401 {
6402 *pu16Dst = u16Value;
6403 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6404 }
6405
6406 /* Commit the new RSP value unless we an access handler made trouble. */
6407 if (rc == VINF_SUCCESS)
6408 *pTmpRsp = NewRsp;
6409
6410 return rc;
6411}
6412
6413
6414/**
6415 * Pushes a dword onto the stack, using a temporary stack pointer.
6416 *
6417 * @returns Strict VBox status code.
6418 * @param pIemCpu The IEM per CPU data.
6419 * @param u32Value The value to push.
6420 * @param pTmpRsp Pointer to the temporary stack pointer.
6421 */
6422static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
6423{
6424 /* Increment the stack pointer. */
6425 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6426 RTUINT64U NewRsp = *pTmpRsp;
6427 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
6428
6429 /* Write the word the lazy way. */
6430 uint32_t *pu32Dst;
6431 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6432 if (rc == VINF_SUCCESS)
6433 {
6434 *pu32Dst = u32Value;
6435 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6436 }
6437
6438 /* Commit the new RSP value unless we an access handler made trouble. */
6439 if (rc == VINF_SUCCESS)
6440 *pTmpRsp = NewRsp;
6441
6442 return rc;
6443}
6444
6445
6446/**
6447 * Pushes a dword onto the stack, using a temporary stack pointer.
6448 *
6449 * @returns Strict VBox status code.
6450 * @param pIemCpu The IEM per CPU data.
6451 * @param u64Value The value to push.
6452 * @param pTmpRsp Pointer to the temporary stack pointer.
6453 */
6454static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
6455{
6456 /* Increment the stack pointer. */
6457 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6458 RTUINT64U NewRsp = *pTmpRsp;
6459 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
6460
6461 /* Write the word the lazy way. */
6462 uint64_t *pu64Dst;
6463 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6464 if (rc == VINF_SUCCESS)
6465 {
6466 *pu64Dst = u64Value;
6467 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6468 }
6469
6470 /* Commit the new RSP value unless we an access handler made trouble. */
6471 if (rc == VINF_SUCCESS)
6472 *pTmpRsp = NewRsp;
6473
6474 return rc;
6475}
6476
6477
6478/**
6479 * Pops a word from the stack, using a temporary stack pointer.
6480 *
6481 * @returns Strict VBox status code.
6482 * @param pIemCpu The IEM per CPU data.
6483 * @param pu16Value Where to store the popped value.
6484 * @param pTmpRsp Pointer to the temporary stack pointer.
6485 */
6486static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
6487{
6488 /* Increment the stack pointer. */
6489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6490 RTUINT64U NewRsp = *pTmpRsp;
6491 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
6492
6493 /* Write the word the lazy way. */
6494 uint16_t const *pu16Src;
6495 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6496 if (rc == VINF_SUCCESS)
6497 {
6498 *pu16Value = *pu16Src;
6499 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6500
6501 /* Commit the new RSP value. */
6502 if (rc == VINF_SUCCESS)
6503 *pTmpRsp = NewRsp;
6504 }
6505
6506 return rc;
6507}
6508
6509
6510/**
6511 * Pops a dword from the stack, using a temporary stack pointer.
6512 *
6513 * @returns Strict VBox status code.
6514 * @param pIemCpu The IEM per CPU data.
6515 * @param pu32Value Where to store the popped value.
6516 * @param pTmpRsp Pointer to the temporary stack pointer.
6517 */
6518static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
6519{
6520 /* Increment the stack pointer. */
6521 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6522 RTUINT64U NewRsp = *pTmpRsp;
6523 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
6524
6525 /* Write the word the lazy way. */
6526 uint32_t const *pu32Src;
6527 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6528 if (rc == VINF_SUCCESS)
6529 {
6530 *pu32Value = *pu32Src;
6531 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6532
6533 /* Commit the new RSP value. */
6534 if (rc == VINF_SUCCESS)
6535 *pTmpRsp = NewRsp;
6536 }
6537
6538 return rc;
6539}
6540
6541
6542/**
6543 * Pops a qword from the stack, using a temporary stack pointer.
6544 *
6545 * @returns Strict VBox status code.
6546 * @param pIemCpu The IEM per CPU data.
6547 * @param pu64Value Where to store the popped value.
6548 * @param pTmpRsp Pointer to the temporary stack pointer.
6549 */
6550static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
6551{
6552 /* Increment the stack pointer. */
6553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6554 RTUINT64U NewRsp = *pTmpRsp;
6555 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6556
6557 /* Write the word the lazy way. */
6558 uint64_t const *pu64Src;
6559 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6560 if (rcStrict == VINF_SUCCESS)
6561 {
6562 *pu64Value = *pu64Src;
6563 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6564
6565 /* Commit the new RSP value. */
6566 if (rcStrict == VINF_SUCCESS)
6567 *pTmpRsp = NewRsp;
6568 }
6569
6570 return rcStrict;
6571}
6572
6573
6574/**
6575 * Begin a special stack push (used by interrupt, exceptions and such).
6576 *
6577 * This will raise #SS or #PF if appropriate.
6578 *
6579 * @returns Strict VBox status code.
6580 * @param pIemCpu The IEM per CPU data.
6581 * @param cbMem The number of bytes to push onto the stack.
6582 * @param ppvMem Where to return the pointer to the stack memory.
6583 * As with the other memory functions this could be
6584 * direct access or bounce buffered access, so
6585 * don't commit register until the commit call
6586 * succeeds.
6587 * @param puNewRsp Where to return the new RSP value. This must be
6588 * passed unchanged to
6589 * iemMemStackPushCommitSpecial().
6590 */
6591static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6592{
6593 Assert(cbMem < UINT8_MAX);
6594 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6595 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6596 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6597}
6598
6599
6600/**
6601 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6602 *
6603 * This will update the rSP.
6604 *
6605 * @returns Strict VBox status code.
6606 * @param pIemCpu The IEM per CPU data.
6607 * @param pvMem The pointer returned by
6608 * iemMemStackPushBeginSpecial().
6609 * @param uNewRsp The new RSP value returned by
6610 * iemMemStackPushBeginSpecial().
6611 */
6612static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6613{
6614 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6615 if (rcStrict == VINF_SUCCESS)
6616 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6617 return rcStrict;
6618}
6619
6620
6621/**
6622 * Begin a special stack pop (used by iret, retf and such).
6623 *
6624 * This will raise \#SS or \#PF if appropriate.
6625 *
6626 * @returns Strict VBox status code.
6627 * @param pIemCpu The IEM per CPU data.
6628 * @param cbMem The number of bytes to push onto the stack.
6629 * @param ppvMem Where to return the pointer to the stack memory.
6630 * @param puNewRsp Where to return the new RSP value. This must be
6631 * passed unchanged to
6632 * iemMemStackPopCommitSpecial() or applied
6633 * manually if iemMemStackPopDoneSpecial() is used.
6634 */
6635static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6636{
6637 Assert(cbMem < UINT8_MAX);
6638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6639 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6640 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6641}
6642
6643
6644/**
6645 * Continue a special stack pop (used by iret and retf).
6646 *
6647 * This will raise \#SS or \#PF if appropriate.
6648 *
6649 * @returns Strict VBox status code.
6650 * @param pIemCpu The IEM per CPU data.
6651 * @param cbMem The number of bytes to push onto the stack.
6652 * @param ppvMem Where to return the pointer to the stack memory.
6653 * @param puNewRsp Where to return the new RSP value. This must be
6654 * passed unchanged to
6655 * iemMemStackPopCommitSpecial() or applied
6656 * manually if iemMemStackPopDoneSpecial() is used.
6657 */
6658static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6659{
6660 Assert(cbMem < UINT8_MAX);
6661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6662 RTUINT64U NewRsp;
6663 NewRsp.u = *puNewRsp;
6664 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6665 *puNewRsp = NewRsp.u;
6666 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6667}
6668
6669
6670/**
6671 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6672 *
6673 * This will update the rSP.
6674 *
6675 * @returns Strict VBox status code.
6676 * @param pIemCpu The IEM per CPU data.
6677 * @param pvMem The pointer returned by
6678 * iemMemStackPopBeginSpecial().
6679 * @param uNewRsp The new RSP value returned by
6680 * iemMemStackPopBeginSpecial().
6681 */
6682static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6683{
6684 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6685 if (rcStrict == VINF_SUCCESS)
6686 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6687 return rcStrict;
6688}
6689
6690
6691/**
6692 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6693 * iemMemStackPopContinueSpecial).
6694 *
6695 * The caller will manually commit the rSP.
6696 *
6697 * @returns Strict VBox status code.
6698 * @param pIemCpu The IEM per CPU data.
6699 * @param pvMem The pointer returned by
6700 * iemMemStackPopBeginSpecial() or
6701 * iemMemStackPopContinueSpecial().
6702 */
6703static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6704{
6705 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6706}
6707
6708
6709/**
6710 * Fetches a system table byte.
6711 *
6712 * @returns Strict VBox status code.
6713 * @param pIemCpu The IEM per CPU data.
6714 * @param pbDst Where to return the byte.
6715 * @param iSegReg The index of the segment register to use for
6716 * this access. The base and limits are checked.
6717 * @param GCPtrMem The address of the guest memory.
6718 */
6719static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6720{
6721 /* The lazy approach for now... */
6722 uint8_t const *pbSrc;
6723 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6724 if (rc == VINF_SUCCESS)
6725 {
6726 *pbDst = *pbSrc;
6727 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
6728 }
6729 return rc;
6730}
6731
6732
6733/**
6734 * Fetches a system table word.
6735 *
6736 * @returns Strict VBox status code.
6737 * @param pIemCpu The IEM per CPU data.
6738 * @param pu16Dst Where to return the word.
6739 * @param iSegReg The index of the segment register to use for
6740 * this access. The base and limits are checked.
6741 * @param GCPtrMem The address of the guest memory.
6742 */
6743static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6744{
6745 /* The lazy approach for now... */
6746 uint16_t const *pu16Src;
6747 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6748 if (rc == VINF_SUCCESS)
6749 {
6750 *pu16Dst = *pu16Src;
6751 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
6752 }
6753 return rc;
6754}
6755
6756
6757/**
6758 * Fetches a system table dword.
6759 *
6760 * @returns Strict VBox status code.
6761 * @param pIemCpu The IEM per CPU data.
6762 * @param pu32Dst Where to return the dword.
6763 * @param iSegReg The index of the segment register to use for
6764 * this access. The base and limits are checked.
6765 * @param GCPtrMem The address of the guest memory.
6766 */
6767static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6768{
6769 /* The lazy approach for now... */
6770 uint32_t const *pu32Src;
6771 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6772 if (rc == VINF_SUCCESS)
6773 {
6774 *pu32Dst = *pu32Src;
6775 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6776 }
6777 return rc;
6778}
6779
6780
6781/**
6782 * Fetches a system table qword.
6783 *
6784 * @returns Strict VBox status code.
6785 * @param pIemCpu The IEM per CPU data.
6786 * @param pu64Dst Where to return the qword.
6787 * @param iSegReg The index of the segment register to use for
6788 * this access. The base and limits are checked.
6789 * @param GCPtrMem The address of the guest memory.
6790 */
6791static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6792{
6793 /* The lazy approach for now... */
6794 uint64_t const *pu64Src;
6795 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6796 if (rc == VINF_SUCCESS)
6797 {
6798 *pu64Dst = *pu64Src;
6799 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6800 }
6801 return rc;
6802}
6803
6804
6805/**
6806 * Fetches a descriptor table entry.
6807 *
6808 * @returns Strict VBox status code.
6809 * @param pIemCpu The IEM per CPU.
6810 * @param pDesc Where to return the descriptor table entry.
6811 * @param uSel The selector which table entry to fetch.
6812 */
6813static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6814{
6815 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6816
6817 /** @todo did the 286 require all 8 bytes to be accessible? */
6818 /*
6819 * Get the selector table base and check bounds.
6820 */
6821 RTGCPTR GCPtrBase;
6822 if (uSel & X86_SEL_LDT)
6823 {
6824 if ( !pCtx->ldtr.Attr.n.u1Present
6825 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6826 {
6827 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6828 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6829 /** @todo is this the right exception? */
6830 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6831 }
6832
6833 Assert(pCtx->ldtr.Attr.n.u1Present);
6834 GCPtrBase = pCtx->ldtr.u64Base;
6835 }
6836 else
6837 {
6838 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6839 {
6840 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6841 /** @todo is this the right exception? */
6842 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6843 }
6844 GCPtrBase = pCtx->gdtr.pGdt;
6845 }
6846
6847 /*
6848 * Read the legacy descriptor and maybe the long mode extensions if
6849 * required.
6850 */
6851 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6852 if (rcStrict == VINF_SUCCESS)
6853 {
6854 if ( !IEM_IS_LONG_MODE(pIemCpu)
6855 || pDesc->Legacy.Gen.u1DescType)
6856 pDesc->Long.au64[1] = 0;
6857 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6858 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6859 else
6860 {
6861 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6862 /** @todo is this the right exception? */
6863 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6864 }
6865 }
6866 return rcStrict;
6867}
6868
6869
6870/**
6871 * Fakes a long mode stack selector for SS = 0.
6872 *
6873 * @param pDescSs Where to return the fake stack descriptor.
6874 * @param uDpl The DPL we want.
6875 */
6876static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6877{
6878 pDescSs->Long.au64[0] = 0;
6879 pDescSs->Long.au64[1] = 0;
6880 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6881 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6882 pDescSs->Long.Gen.u2Dpl = uDpl;
6883 pDescSs->Long.Gen.u1Present = 1;
6884 pDescSs->Long.Gen.u1Long = 1;
6885}
6886
6887
6888/**
6889 * Marks the selector descriptor as accessed (only non-system descriptors).
6890 *
6891 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6892 * will therefore skip the limit checks.
6893 *
6894 * @returns Strict VBox status code.
6895 * @param pIemCpu The IEM per CPU.
6896 * @param uSel The selector.
6897 */
6898static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6899{
6900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6901
6902 /*
6903 * Get the selector table base and calculate the entry address.
6904 */
6905 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6906 ? pCtx->ldtr.u64Base
6907 : pCtx->gdtr.pGdt;
6908 GCPtr += uSel & X86_SEL_MASK;
6909
6910 /*
6911 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6912 * ugly stuff to avoid this. This will make sure it's an atomic access
6913 * as well more or less remove any question about 8-bit or 32-bit accesss.
6914 */
6915 VBOXSTRICTRC rcStrict;
6916 uint32_t volatile *pu32;
6917 if ((GCPtr & 3) == 0)
6918 {
6919 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6920 GCPtr += 2 + 2;
6921 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6922 if (rcStrict != VINF_SUCCESS)
6923 return rcStrict;
6924 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6925 }
6926 else
6927 {
6928 /* The misaligned GDT/LDT case, map the whole thing. */
6929 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6930 if (rcStrict != VINF_SUCCESS)
6931 return rcStrict;
6932 switch ((uintptr_t)pu32 & 3)
6933 {
6934 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6935 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6936 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6937 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6938 }
6939 }
6940
6941 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6942}
6943
6944/** @} */
6945
6946
6947/*
6948 * Include the C/C++ implementation of instruction.
6949 */
6950#include "IEMAllCImpl.cpp.h"
6951
6952
6953
6954/** @name "Microcode" macros.
6955 *
6956 * The idea is that we should be able to use the same code to interpret
6957 * instructions as well as recompiler instructions. Thus this obfuscation.
6958 *
6959 * @{
6960 */
6961#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6962#define IEM_MC_END() }
6963#define IEM_MC_PAUSE() do {} while (0)
6964#define IEM_MC_CONTINUE() do {} while (0)
6965
6966/** Internal macro. */
6967#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6968 do \
6969 { \
6970 VBOXSTRICTRC rcStrict2 = a_Expr; \
6971 if (rcStrict2 != VINF_SUCCESS) \
6972 return rcStrict2; \
6973 } while (0)
6974
6975#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6976#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6977#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6978#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6979#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6980#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6981#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6982
6983#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6984#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6985 do { \
6986 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6987 return iemRaiseDeviceNotAvailable(pIemCpu); \
6988 } while (0)
6989#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6990 do { \
6991 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6992 return iemRaiseMathFault(pIemCpu); \
6993 } while (0)
6994#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
6995 do { \
6996 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
6997 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
6998 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
6999 return iemRaiseUndefinedOpcode(pIemCpu); \
7000 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7001 return iemRaiseDeviceNotAvailable(pIemCpu); \
7002 } while (0)
7003#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
7004 do { \
7005 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7006 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
7007 return iemRaiseUndefinedOpcode(pIemCpu); \
7008 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7009 return iemRaiseDeviceNotAvailable(pIemCpu); \
7010 } while (0)
7011#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
7012 do { \
7013 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7014 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
7015 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
7016 return iemRaiseUndefinedOpcode(pIemCpu); \
7017 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7018 return iemRaiseDeviceNotAvailable(pIemCpu); \
7019 } while (0)
7020#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
7021 do { \
7022 if (pIemCpu->uCpl != 0) \
7023 return iemRaiseGeneralProtectionFault0(pIemCpu); \
7024 } while (0)
7025
7026
7027#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
7028#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
7029#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
7030#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
7031#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
7032#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
7033#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
7034 uint32_t a_Name; \
7035 uint32_t *a_pName = &a_Name
7036#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
7037 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
7038
7039#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
7040#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
7041
7042#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7043#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7044#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7045#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7046#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7047#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7048#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7049#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7050#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7051#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7052#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7053#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7054#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7055#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7056#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
7057#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
7058#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
7059#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7060#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7061#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7062#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7063#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7064#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
7065#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7066#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7067#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7068#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7069#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7070#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7071/** @note Not for IOPL or IF testing or modification. */
7072#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7073#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7074#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
7075#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
7076
7077#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
7078#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
7079#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
7080#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
7081#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
7082#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
7083#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
7084#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
7085#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
7086#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
7087#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
7088 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
7089
7090#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
7091#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
7092/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
7093 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
7094#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
7095#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
7096/** @note Not for IOPL or IF testing or modification. */
7097#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7098
7099#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
7100#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
7101#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
7102 do { \
7103 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7104 *pu32Reg += (a_u32Value); \
7105 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7106 } while (0)
7107#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
7108
7109#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
7110#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
7111#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
7112 do { \
7113 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7114 *pu32Reg -= (a_u32Value); \
7115 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7116 } while (0)
7117#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
7118
7119#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
7120#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
7121#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
7122#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
7123#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
7124#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
7125#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
7126
7127#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
7128#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
7129#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7130#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
7131
7132#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
7133#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
7134#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
7135
7136#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
7137#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7138
7139#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
7140#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
7141#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
7142
7143#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
7144#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
7145#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
7146
7147#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7148
7149#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7150
7151#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
7152#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
7153#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
7154 do { \
7155 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7156 *pu32Reg &= (a_u32Value); \
7157 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7158 } while (0)
7159#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
7160
7161#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
7162#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
7163#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
7164 do { \
7165 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7166 *pu32Reg |= (a_u32Value); \
7167 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7168 } while (0)
7169#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
7170
7171
7172/** @note Not for IOPL or IF modification. */
7173#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
7174/** @note Not for IOPL or IF modification. */
7175#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
7176/** @note Not for IOPL or IF modification. */
7177#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
7178
7179#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
7180
7181
7182#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
7183 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
7184#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
7185 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
7186#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
7187 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
7188#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
7189 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
7190#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
7191 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7192#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
7193 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7194#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
7195 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7196
7197#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
7198 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
7199#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
7200 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
7201#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
7202 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
7203#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
7204 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
7205#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
7206 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
7207 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7208 } while (0)
7209#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
7210 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
7211 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7212 } while (0)
7213#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
7214 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7215#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
7216 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7217#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
7218 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
7219
7220#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
7221 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
7222#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
7223 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
7224#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
7225 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
7226
7227#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7228 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
7229#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7230 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7231#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
7232 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
7233
7234#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7235 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
7236#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7237 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7238#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
7239 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
7240
7241#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7242 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7243
7244#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7245 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7246#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7247 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7248#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7249 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7250
7251#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
7252 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
7253#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
7254 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
7255#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
7256 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
7257
7258#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7259 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7260#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
7261 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7262
7263
7264
7265#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7266 do { \
7267 uint8_t u8Tmp; \
7268 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7269 (a_u16Dst) = u8Tmp; \
7270 } while (0)
7271#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7272 do { \
7273 uint8_t u8Tmp; \
7274 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7275 (a_u32Dst) = u8Tmp; \
7276 } while (0)
7277#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7278 do { \
7279 uint8_t u8Tmp; \
7280 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7281 (a_u64Dst) = u8Tmp; \
7282 } while (0)
7283#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7284 do { \
7285 uint16_t u16Tmp; \
7286 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7287 (a_u32Dst) = u16Tmp; \
7288 } while (0)
7289#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7290 do { \
7291 uint16_t u16Tmp; \
7292 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7293 (a_u64Dst) = u16Tmp; \
7294 } while (0)
7295#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7296 do { \
7297 uint32_t u32Tmp; \
7298 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7299 (a_u64Dst) = u32Tmp; \
7300 } while (0)
7301
7302#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7303 do { \
7304 uint8_t u8Tmp; \
7305 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7306 (a_u16Dst) = (int8_t)u8Tmp; \
7307 } while (0)
7308#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7309 do { \
7310 uint8_t u8Tmp; \
7311 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7312 (a_u32Dst) = (int8_t)u8Tmp; \
7313 } while (0)
7314#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7315 do { \
7316 uint8_t u8Tmp; \
7317 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7318 (a_u64Dst) = (int8_t)u8Tmp; \
7319 } while (0)
7320#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7321 do { \
7322 uint16_t u16Tmp; \
7323 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7324 (a_u32Dst) = (int16_t)u16Tmp; \
7325 } while (0)
7326#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7327 do { \
7328 uint16_t u16Tmp; \
7329 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7330 (a_u64Dst) = (int16_t)u16Tmp; \
7331 } while (0)
7332#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7333 do { \
7334 uint32_t u32Tmp; \
7335 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7336 (a_u64Dst) = (int32_t)u32Tmp; \
7337 } while (0)
7338
7339#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
7340 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
7341#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
7342 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
7343#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
7344 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
7345#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
7346 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
7347
7348#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
7349 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
7350#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
7351 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
7352#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
7353 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
7354#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
7355 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
7356
7357#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
7358#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
7359#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
7360#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
7361#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
7362#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
7363#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
7364 do { \
7365 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
7366 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
7367 } while (0)
7368
7369#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
7370 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7371#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
7372 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7373
7374
7375#define IEM_MC_PUSH_U16(a_u16Value) \
7376 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
7377#define IEM_MC_PUSH_U32(a_u32Value) \
7378 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
7379#define IEM_MC_PUSH_U64(a_u64Value) \
7380 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
7381
7382#define IEM_MC_POP_U16(a_pu16Value) \
7383 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
7384#define IEM_MC_POP_U32(a_pu32Value) \
7385 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
7386#define IEM_MC_POP_U64(a_pu64Value) \
7387 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
7388
7389/** Maps guest memory for direct or bounce buffered access.
7390 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7391 * @remarks May return.
7392 */
7393#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
7394 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7395
7396/** Maps guest memory for direct or bounce buffered access.
7397 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7398 * @remarks May return.
7399 */
7400#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
7401 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7402
7403/** Commits the memory and unmaps the guest memory.
7404 * @remarks May return.
7405 */
7406#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
7407 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
7408
7409/** Commits the memory and unmaps the guest memory unless the FPU status word
7410 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
7411 * that would cause FLD not to store.
7412 *
7413 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
7414 * store, while \#P will not.
7415 *
7416 * @remarks May in theory return - for now.
7417 */
7418#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
7419 do { \
7420 if ( !(a_u16FSW & X86_FSW_ES) \
7421 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
7422 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
7423 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
7424 } while (0)
7425
7426/** Calculate efficient address from R/M. */
7427#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
7428 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
7429
7430#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
7431#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
7432#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
7433#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
7434#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
7435#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
7436#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
7437
7438/**
7439 * Defers the rest of the instruction emulation to a C implementation routine
7440 * and returns, only taking the standard parameters.
7441 *
7442 * @param a_pfnCImpl The pointer to the C routine.
7443 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7444 */
7445#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7446
7447/**
7448 * Defers the rest of instruction emulation to a C implementation routine and
7449 * returns, taking one argument in addition to the standard ones.
7450 *
7451 * @param a_pfnCImpl The pointer to the C routine.
7452 * @param a0 The argument.
7453 */
7454#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7455
7456/**
7457 * Defers the rest of the instruction emulation to a C implementation routine
7458 * and returns, taking two arguments in addition to the standard ones.
7459 *
7460 * @param a_pfnCImpl The pointer to the C routine.
7461 * @param a0 The first extra argument.
7462 * @param a1 The second extra argument.
7463 */
7464#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7465
7466/**
7467 * Defers the rest of the instruction emulation to a C implementation routine
7468 * and returns, taking two arguments in addition to the standard ones.
7469 *
7470 * @param a_pfnCImpl The pointer to the C routine.
7471 * @param a0 The first extra argument.
7472 * @param a1 The second extra argument.
7473 * @param a2 The third extra argument.
7474 */
7475#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7476
7477/**
7478 * Defers the rest of the instruction emulation to a C implementation routine
7479 * and returns, taking two arguments in addition to the standard ones.
7480 *
7481 * @param a_pfnCImpl The pointer to the C routine.
7482 * @param a0 The first extra argument.
7483 * @param a1 The second extra argument.
7484 * @param a2 The third extra argument.
7485 * @param a3 The fourth extra argument.
7486 * @param a4 The fifth extra argument.
7487 */
7488#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
7489
7490/**
7491 * Defers the entire instruction emulation to a C implementation routine and
7492 * returns, only taking the standard parameters.
7493 *
7494 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7495 *
7496 * @param a_pfnCImpl The pointer to the C routine.
7497 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7498 */
7499#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7500
7501/**
7502 * Defers the entire instruction emulation to a C implementation routine and
7503 * returns, taking one argument in addition to the standard ones.
7504 *
7505 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7506 *
7507 * @param a_pfnCImpl The pointer to the C routine.
7508 * @param a0 The argument.
7509 */
7510#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7511
7512/**
7513 * Defers the entire instruction emulation to a C implementation routine and
7514 * returns, taking two arguments in addition to the standard ones.
7515 *
7516 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7517 *
7518 * @param a_pfnCImpl The pointer to the C routine.
7519 * @param a0 The first extra argument.
7520 * @param a1 The second extra argument.
7521 */
7522#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7523
7524/**
7525 * Defers the entire instruction emulation to a C implementation routine and
7526 * returns, taking three arguments in addition to the standard ones.
7527 *
7528 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7529 *
7530 * @param a_pfnCImpl The pointer to the C routine.
7531 * @param a0 The first extra argument.
7532 * @param a1 The second extra argument.
7533 * @param a2 The third extra argument.
7534 */
7535#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7536
7537/**
7538 * Calls a FPU assembly implementation taking one visible argument.
7539 *
7540 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7541 * @param a0 The first extra argument.
7542 */
7543#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
7544 do { \
7545 iemFpuPrepareUsage(pIemCpu); \
7546 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
7547 } while (0)
7548
7549/**
7550 * Calls a FPU assembly implementation taking two visible arguments.
7551 *
7552 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7553 * @param a0 The first extra argument.
7554 * @param a1 The second extra argument.
7555 */
7556#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
7557 do { \
7558 iemFpuPrepareUsage(pIemCpu); \
7559 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7560 } while (0)
7561
7562/**
7563 * Calls a FPU assembly implementation taking three visible arguments.
7564 *
7565 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7566 * @param a0 The first extra argument.
7567 * @param a1 The second extra argument.
7568 * @param a2 The third extra argument.
7569 */
7570#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7571 do { \
7572 iemFpuPrepareUsage(pIemCpu); \
7573 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7574 } while (0)
7575
7576#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
7577 do { \
7578 (a_FpuData).FSW = (a_FSW); \
7579 (a_FpuData).r80Result = *(a_pr80Value); \
7580 } while (0)
7581
7582/** Pushes FPU result onto the stack. */
7583#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
7584 iemFpuPushResult(pIemCpu, &a_FpuData)
7585/** Pushes FPU result onto the stack and sets the FPUDP. */
7586#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
7587 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
7588
7589/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
7590#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
7591 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
7592
7593/** Stores FPU result in a stack register. */
7594#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
7595 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
7596/** Stores FPU result in a stack register and pops the stack. */
7597#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
7598 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
7599/** Stores FPU result in a stack register and sets the FPUDP. */
7600#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7601 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7602/** Stores FPU result in a stack register, sets the FPUDP, and pops the
7603 * stack. */
7604#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7605 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7606
7607/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
7608#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
7609 iemFpuUpdateOpcodeAndIp(pIemCpu)
7610/** Free a stack register (for FFREE and FFREEP). */
7611#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
7612 iemFpuStackFree(pIemCpu, a_iStReg)
7613/** Increment the FPU stack pointer. */
7614#define IEM_MC_FPU_STACK_INC_TOP() \
7615 iemFpuStackIncTop(pIemCpu)
7616/** Decrement the FPU stack pointer. */
7617#define IEM_MC_FPU_STACK_DEC_TOP() \
7618 iemFpuStackDecTop(pIemCpu)
7619
7620/** Updates the FSW, FOP, FPUIP, and FPUCS. */
7621#define IEM_MC_UPDATE_FSW(a_u16FSW) \
7622 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7623/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
7624#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
7625 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7626/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
7627#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7628 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7629/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
7630#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
7631 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7632/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
7633 * stack. */
7634#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7635 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7636/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
7637#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
7638 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7639
7640/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
7641#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
7642 iemFpuStackUnderflow(pIemCpu, a_iStDst)
7643/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7644 * stack. */
7645#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
7646 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
7647/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7648 * FPUDS. */
7649#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7650 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7651/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7652 * FPUDS. Pops stack. */
7653#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7654 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7655/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7656 * stack twice. */
7657#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
7658 iemFpuStackUnderflowThenPopPop(pIemCpu)
7659/** Raises a FPU stack underflow exception for an instruction pushing a result
7660 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
7661#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
7662 iemFpuStackPushUnderflow(pIemCpu)
7663/** Raises a FPU stack underflow exception for an instruction pushing a result
7664 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
7665#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
7666 iemFpuStackPushUnderflowTwo(pIemCpu)
7667
7668/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7669 * FPUIP, FPUCS and FOP. */
7670#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
7671 iemFpuStackPushOverflow(pIemCpu)
7672/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7673 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
7674#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
7675 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
7676/** Indicates that we (might) have modified the FPU state. */
7677#define IEM_MC_USED_FPU() \
7678 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
7679
7680/**
7681 * Calls a MMX assembly implementation taking two visible arguments.
7682 *
7683 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7684 * @param a0 The first extra argument.
7685 * @param a1 The second extra argument.
7686 */
7687#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
7688 do { \
7689 iemFpuPrepareUsage(pIemCpu); \
7690 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7691 } while (0)
7692
7693/**
7694 * Calls a MMX assembly implementation taking three visible arguments.
7695 *
7696 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7697 * @param a0 The first extra argument.
7698 * @param a1 The second extra argument.
7699 * @param a2 The third extra argument.
7700 */
7701#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7702 do { \
7703 iemFpuPrepareUsage(pIemCpu); \
7704 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7705 } while (0)
7706
7707
7708/**
7709 * Calls a SSE assembly implementation taking two visible arguments.
7710 *
7711 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7712 * @param a0 The first extra argument.
7713 * @param a1 The second extra argument.
7714 */
7715#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
7716 do { \
7717 iemFpuPrepareUsageSse(pIemCpu); \
7718 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7719 } while (0)
7720
7721/**
7722 * Calls a SSE assembly implementation taking three visible arguments.
7723 *
7724 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7725 * @param a0 The first extra argument.
7726 * @param a1 The second extra argument.
7727 * @param a2 The third extra argument.
7728 */
7729#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7730 do { \
7731 iemFpuPrepareUsageSse(pIemCpu); \
7732 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7733 } while (0)
7734
7735
7736/** @note Not for IOPL or IF testing. */
7737#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
7738/** @note Not for IOPL or IF testing. */
7739#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
7740/** @note Not for IOPL or IF testing. */
7741#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
7742/** @note Not for IOPL or IF testing. */
7743#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
7744/** @note Not for IOPL or IF testing. */
7745#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
7746 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7747 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7748/** @note Not for IOPL or IF testing. */
7749#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7750 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7751 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7752/** @note Not for IOPL or IF testing. */
7753#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7754 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7755 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7756 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7757/** @note Not for IOPL or IF testing. */
7758#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7759 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7760 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7761 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7762#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7763#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7764#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7765/** @note Not for IOPL or IF testing. */
7766#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7767 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7768 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7769/** @note Not for IOPL or IF testing. */
7770#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7771 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7772 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7773/** @note Not for IOPL or IF testing. */
7774#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7775 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7776 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7777/** @note Not for IOPL or IF testing. */
7778#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7779 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7780 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7781/** @note Not for IOPL or IF testing. */
7782#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7783 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7784 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7785/** @note Not for IOPL or IF testing. */
7786#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7787 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7788 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7789#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7790#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7791#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7792 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7793#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7794 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7795#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7796 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7797#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7798 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7799#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7800 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7801#define IEM_MC_IF_FCW_IM() \
7802 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7803
7804#define IEM_MC_ELSE() } else {
7805#define IEM_MC_ENDIF() } do {} while (0)
7806
7807/** @} */
7808
7809
7810/** @name Opcode Debug Helpers.
7811 * @{
7812 */
7813#ifdef DEBUG
7814# define IEMOP_MNEMONIC(a_szMnemonic) \
7815 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7816 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7817# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7818 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7819 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7820#else
7821# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7822# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7823#endif
7824
7825/** @} */
7826
7827
7828/** @name Opcode Helpers.
7829 * @{
7830 */
7831
7832/** The instruction raises an \#UD in real and V8086 mode. */
7833#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7834 do \
7835 { \
7836 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7837 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7838 } while (0)
7839
7840/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7841 * lock prefixed.
7842 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7843#define IEMOP_HLP_NO_LOCK_PREFIX() \
7844 do \
7845 { \
7846 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7847 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7848 } while (0)
7849
7850/** The instruction is not available in 64-bit mode, throw #UD if we're in
7851 * 64-bit mode. */
7852#define IEMOP_HLP_NO_64BIT() \
7853 do \
7854 { \
7855 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7856 return IEMOP_RAISE_INVALID_OPCODE(); \
7857 } while (0)
7858
7859/** The instruction is only available in 64-bit mode, throw #UD if we're not in
7860 * 64-bit mode. */
7861#define IEMOP_HLP_ONLY_64BIT() \
7862 do \
7863 { \
7864 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
7865 return IEMOP_RAISE_INVALID_OPCODE(); \
7866 } while (0)
7867
7868/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7869#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7870 do \
7871 { \
7872 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7873 iemRecalEffOpSize64Default(pIemCpu); \
7874 } while (0)
7875
7876/** The instruction has 64-bit operand size if 64-bit mode. */
7877#define IEMOP_HLP_64BIT_OP_SIZE() \
7878 do \
7879 { \
7880 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7881 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7882 } while (0)
7883
7884/** Only a REX prefix immediately preceeding the first opcode byte takes
7885 * effect. This macro helps ensuring this as well as logging bad guest code. */
7886#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
7887 do \
7888 { \
7889 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
7890 { \
7891 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
7892 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
7893 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
7894 pIemCpu->uRexB = 0; \
7895 pIemCpu->uRexIndex = 0; \
7896 pIemCpu->uRexReg = 0; \
7897 iemRecalEffOpSize(pIemCpu); \
7898 } \
7899 } while (0)
7900
7901/**
7902 * Done decoding.
7903 */
7904#define IEMOP_HLP_DONE_DECODING() \
7905 do \
7906 { \
7907 /*nothing for now, maybe later... */ \
7908 } while (0)
7909
7910/**
7911 * Done decoding, raise \#UD exception if lock prefix present.
7912 */
7913#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7914 do \
7915 { \
7916 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7917 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7918 } while (0)
7919#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
7920 do \
7921 { \
7922 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7923 { \
7924 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
7925 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7926 } \
7927 } while (0)
7928#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
7929 do \
7930 { \
7931 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7932 { \
7933 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
7934 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7935 } \
7936 } while (0)
7937
7938
7939/**
7940 * Calculates the effective address of a ModR/M memory operand.
7941 *
7942 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7943 *
7944 * @return Strict VBox status code.
7945 * @param pIemCpu The IEM per CPU data.
7946 * @param bRm The ModRM byte.
7947 * @param cbImm The size of any immediate following the
7948 * effective address opcode bytes. Important for
7949 * RIP relative addressing.
7950 * @param pGCPtrEff Where to return the effective address.
7951 */
7952static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
7953{
7954 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7955 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7956#define SET_SS_DEF() \
7957 do \
7958 { \
7959 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7960 pIemCpu->iEffSeg = X86_SREG_SS; \
7961 } while (0)
7962
7963 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
7964 {
7965/** @todo Check the effective address size crap! */
7966 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
7967 {
7968 uint16_t u16EffAddr;
7969
7970 /* Handle the disp16 form with no registers first. */
7971 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7972 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7973 else
7974 {
7975 /* Get the displacment. */
7976 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7977 {
7978 case 0: u16EffAddr = 0; break;
7979 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7980 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7981 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7982 }
7983
7984 /* Add the base and index registers to the disp. */
7985 switch (bRm & X86_MODRM_RM_MASK)
7986 {
7987 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7988 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7989 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7990 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7991 case 4: u16EffAddr += pCtx->si; break;
7992 case 5: u16EffAddr += pCtx->di; break;
7993 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7994 case 7: u16EffAddr += pCtx->bx; break;
7995 }
7996 }
7997
7998 *pGCPtrEff = u16EffAddr;
7999 }
8000 else
8001 {
8002 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8003 uint32_t u32EffAddr;
8004
8005 /* Handle the disp32 form with no registers first. */
8006 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8007 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8008 else
8009 {
8010 /* Get the register (or SIB) value. */
8011 switch ((bRm & X86_MODRM_RM_MASK))
8012 {
8013 case 0: u32EffAddr = pCtx->eax; break;
8014 case 1: u32EffAddr = pCtx->ecx; break;
8015 case 2: u32EffAddr = pCtx->edx; break;
8016 case 3: u32EffAddr = pCtx->ebx; break;
8017 case 4: /* SIB */
8018 {
8019 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8020
8021 /* Get the index and scale it. */
8022 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8023 {
8024 case 0: u32EffAddr = pCtx->eax; break;
8025 case 1: u32EffAddr = pCtx->ecx; break;
8026 case 2: u32EffAddr = pCtx->edx; break;
8027 case 3: u32EffAddr = pCtx->ebx; break;
8028 case 4: u32EffAddr = 0; /*none */ break;
8029 case 5: u32EffAddr = pCtx->ebp; break;
8030 case 6: u32EffAddr = pCtx->esi; break;
8031 case 7: u32EffAddr = pCtx->edi; break;
8032 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8033 }
8034 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8035
8036 /* add base */
8037 switch (bSib & X86_SIB_BASE_MASK)
8038 {
8039 case 0: u32EffAddr += pCtx->eax; break;
8040 case 1: u32EffAddr += pCtx->ecx; break;
8041 case 2: u32EffAddr += pCtx->edx; break;
8042 case 3: u32EffAddr += pCtx->ebx; break;
8043 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
8044 case 5:
8045 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8046 {
8047 u32EffAddr += pCtx->ebp;
8048 SET_SS_DEF();
8049 }
8050 else
8051 {
8052 uint32_t u32Disp;
8053 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8054 u32EffAddr += u32Disp;
8055 }
8056 break;
8057 case 6: u32EffAddr += pCtx->esi; break;
8058 case 7: u32EffAddr += pCtx->edi; break;
8059 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8060 }
8061 break;
8062 }
8063 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
8064 case 6: u32EffAddr = pCtx->esi; break;
8065 case 7: u32EffAddr = pCtx->edi; break;
8066 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8067 }
8068
8069 /* Get and add the displacement. */
8070 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8071 {
8072 case 0:
8073 break;
8074 case 1:
8075 {
8076 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8077 u32EffAddr += i8Disp;
8078 break;
8079 }
8080 case 2:
8081 {
8082 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8083 u32EffAddr += u32Disp;
8084 break;
8085 }
8086 default:
8087 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
8088 }
8089
8090 }
8091 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
8092 *pGCPtrEff = u32EffAddr;
8093 else
8094 {
8095 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
8096 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8097 }
8098 }
8099 }
8100 else
8101 {
8102 uint64_t u64EffAddr;
8103
8104 /* Handle the rip+disp32 form with no registers first. */
8105 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8106 {
8107 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8108 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
8109 }
8110 else
8111 {
8112 /* Get the register (or SIB) value. */
8113 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
8114 {
8115 case 0: u64EffAddr = pCtx->rax; break;
8116 case 1: u64EffAddr = pCtx->rcx; break;
8117 case 2: u64EffAddr = pCtx->rdx; break;
8118 case 3: u64EffAddr = pCtx->rbx; break;
8119 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
8120 case 6: u64EffAddr = pCtx->rsi; break;
8121 case 7: u64EffAddr = pCtx->rdi; break;
8122 case 8: u64EffAddr = pCtx->r8; break;
8123 case 9: u64EffAddr = pCtx->r9; break;
8124 case 10: u64EffAddr = pCtx->r10; break;
8125 case 11: u64EffAddr = pCtx->r11; break;
8126 case 13: u64EffAddr = pCtx->r13; break;
8127 case 14: u64EffAddr = pCtx->r14; break;
8128 case 15: u64EffAddr = pCtx->r15; break;
8129 /* SIB */
8130 case 4:
8131 case 12:
8132 {
8133 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8134
8135 /* Get the index and scale it. */
8136 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
8137 {
8138 case 0: u64EffAddr = pCtx->rax; break;
8139 case 1: u64EffAddr = pCtx->rcx; break;
8140 case 2: u64EffAddr = pCtx->rdx; break;
8141 case 3: u64EffAddr = pCtx->rbx; break;
8142 case 4: u64EffAddr = 0; /*none */ break;
8143 case 5: u64EffAddr = pCtx->rbp; break;
8144 case 6: u64EffAddr = pCtx->rsi; break;
8145 case 7: u64EffAddr = pCtx->rdi; break;
8146 case 8: u64EffAddr = pCtx->r8; break;
8147 case 9: u64EffAddr = pCtx->r9; break;
8148 case 10: u64EffAddr = pCtx->r10; break;
8149 case 11: u64EffAddr = pCtx->r11; break;
8150 case 12: u64EffAddr = pCtx->r12; break;
8151 case 13: u64EffAddr = pCtx->r13; break;
8152 case 14: u64EffAddr = pCtx->r14; break;
8153 case 15: u64EffAddr = pCtx->r15; break;
8154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8155 }
8156 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8157
8158 /* add base */
8159 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
8160 {
8161 case 0: u64EffAddr += pCtx->rax; break;
8162 case 1: u64EffAddr += pCtx->rcx; break;
8163 case 2: u64EffAddr += pCtx->rdx; break;
8164 case 3: u64EffAddr += pCtx->rbx; break;
8165 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
8166 case 6: u64EffAddr += pCtx->rsi; break;
8167 case 7: u64EffAddr += pCtx->rdi; break;
8168 case 8: u64EffAddr += pCtx->r8; break;
8169 case 9: u64EffAddr += pCtx->r9; break;
8170 case 10: u64EffAddr += pCtx->r10; break;
8171 case 11: u64EffAddr += pCtx->r11; break;
8172 case 12: u64EffAddr += pCtx->r12; break;
8173 case 14: u64EffAddr += pCtx->r14; break;
8174 case 15: u64EffAddr += pCtx->r15; break;
8175 /* complicated encodings */
8176 case 5:
8177 case 13:
8178 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8179 {
8180 if (!pIemCpu->uRexB)
8181 {
8182 u64EffAddr += pCtx->rbp;
8183 SET_SS_DEF();
8184 }
8185 else
8186 u64EffAddr += pCtx->r13;
8187 }
8188 else
8189 {
8190 uint32_t u32Disp;
8191 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8192 u64EffAddr += (int32_t)u32Disp;
8193 }
8194 break;
8195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8196 }
8197 break;
8198 }
8199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8200 }
8201
8202 /* Get and add the displacement. */
8203 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8204 {
8205 case 0:
8206 break;
8207 case 1:
8208 {
8209 int8_t i8Disp;
8210 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8211 u64EffAddr += i8Disp;
8212 break;
8213 }
8214 case 2:
8215 {
8216 uint32_t u32Disp;
8217 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8218 u64EffAddr += (int32_t)u32Disp;
8219 break;
8220 }
8221 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8222 }
8223
8224 }
8225
8226 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
8227 *pGCPtrEff = u64EffAddr;
8228 else
8229 {
8230 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8231 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8232 }
8233 }
8234
8235 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8236 return VINF_SUCCESS;
8237}
8238
8239/** @} */
8240
8241
8242
8243/*
8244 * Include the instructions
8245 */
8246#include "IEMAllInstructions.cpp.h"
8247
8248
8249
8250
8251#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8252
8253/**
8254 * Sets up execution verification mode.
8255 */
8256static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
8257{
8258 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8259 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
8260
8261 /*
8262 * Always note down the address of the current instruction.
8263 */
8264 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
8265 pIemCpu->uOldRip = pOrgCtx->rip;
8266
8267 /*
8268 * Enable verification and/or logging.
8269 */
8270 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
8271 if ( pIemCpu->fNoRem
8272 && ( 0
8273#if 0 /* auto enable on first paged protected mode interrupt */
8274 || ( pOrgCtx->eflags.Bits.u1IF
8275 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
8276 && TRPMHasTrap(pVCpu)
8277 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
8278#endif
8279#if 0
8280 || ( pOrgCtx->cs == 0x10
8281 && ( pOrgCtx->rip == 0x90119e3e
8282 || pOrgCtx->rip == 0x901d9810)
8283#endif
8284#if 0 /* Auto enable DSL - FPU stuff. */
8285 || ( pOrgCtx->cs == 0x10
8286 && (// pOrgCtx->rip == 0xc02ec07f
8287 //|| pOrgCtx->rip == 0xc02ec082
8288 //|| pOrgCtx->rip == 0xc02ec0c9
8289 0
8290 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
8291#endif
8292#if 0 /* Auto enable DSL - fstp st0 stuff. */
8293 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
8294#endif
8295#if 0
8296 || pOrgCtx->rip == 0x9022bb3a
8297#endif
8298#if 0
8299 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
8300#endif
8301#if 0
8302 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
8303 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
8304#endif
8305#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
8306 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
8307 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
8308 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
8309#endif
8310#if 0 /* NT4SP1 - xadd early boot. */
8311 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
8312#endif
8313#if 0 /* NT4SP1 - wrmsr (intel MSR). */
8314 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
8315#endif
8316#if 0 /* NT4SP1 - cmpxchg (AMD). */
8317 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
8318#endif
8319#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
8320 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
8321#endif
8322#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
8323 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
8324
8325#endif
8326#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
8327 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
8328
8329#endif
8330#if 0 /* NT4SP1 - frstor [ecx] */
8331 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
8332#endif
8333#if 0 /* xxxxxx - All long mode code. */
8334 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
8335#endif
8336#if 0 /* rep movsq linux 3.7 64-bit boot. */
8337 || (pOrgCtx->rip == 0x0000000000100241)
8338#endif
8339#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
8340 || (pOrgCtx->rip == 0x000000000215e240)
8341#endif
8342 )
8343 )
8344 {
8345 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
8346 RTLogFlags(NULL, "enabled");
8347 pIemCpu->fNoRem = false;
8348 }
8349
8350 /*
8351 * Switch state.
8352 */
8353 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8354 {
8355 static CPUMCTX s_DebugCtx; /* Ugly! */
8356
8357 s_DebugCtx = *pOrgCtx;
8358 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
8359 }
8360
8361 /*
8362 * See if there is an interrupt pending in TRPM and inject it if we can.
8363 */
8364 pIemCpu->uInjectCpl = UINT8_MAX;
8365 if ( pOrgCtx->eflags.Bits.u1IF
8366 && TRPMHasTrap(pVCpu)
8367 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
8368 {
8369 uint8_t u8TrapNo;
8370 TRPMEVENT enmType;
8371 RTGCUINT uErrCode;
8372 RTGCPTR uCr2;
8373 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
8374 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
8375 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8376 TRPMResetTrap(pVCpu);
8377 pIemCpu->uInjectCpl = pIemCpu->uCpl;
8378 }
8379
8380 /*
8381 * Reset the counters.
8382 */
8383 pIemCpu->cIOReads = 0;
8384 pIemCpu->cIOWrites = 0;
8385 pIemCpu->fIgnoreRaxRdx = false;
8386 pIemCpu->fOverlappingMovs = false;
8387 pIemCpu->fUndefinedEFlags = 0;
8388
8389 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8390 {
8391 /*
8392 * Free all verification records.
8393 */
8394 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
8395 pIemCpu->pIemEvtRecHead = NULL;
8396 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
8397 do
8398 {
8399 while (pEvtRec)
8400 {
8401 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
8402 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
8403 pIemCpu->pFreeEvtRec = pEvtRec;
8404 pEvtRec = pNext;
8405 }
8406 pEvtRec = pIemCpu->pOtherEvtRecHead;
8407 pIemCpu->pOtherEvtRecHead = NULL;
8408 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
8409 } while (pEvtRec);
8410 }
8411}
8412
8413
8414/**
8415 * Allocate an event record.
8416 * @returns Pointer to a record.
8417 */
8418static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
8419{
8420 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8421 return NULL;
8422
8423 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
8424 if (pEvtRec)
8425 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
8426 else
8427 {
8428 if (!pIemCpu->ppIemEvtRecNext)
8429 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
8430
8431 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
8432 if (!pEvtRec)
8433 return NULL;
8434 }
8435 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
8436 pEvtRec->pNext = NULL;
8437 return pEvtRec;
8438}
8439
8440
8441/**
8442 * IOMMMIORead notification.
8443 */
8444VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
8445{
8446 PVMCPU pVCpu = VMMGetCpu(pVM);
8447 if (!pVCpu)
8448 return;
8449 PIEMCPU pIemCpu = &pVCpu->iem.s;
8450 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8451 if (!pEvtRec)
8452 return;
8453 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8454 pEvtRec->u.RamRead.GCPhys = GCPhys;
8455 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
8456 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8457 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8458}
8459
8460
8461/**
8462 * IOMMMIOWrite notification.
8463 */
8464VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
8465{
8466 PVMCPU pVCpu = VMMGetCpu(pVM);
8467 if (!pVCpu)
8468 return;
8469 PIEMCPU pIemCpu = &pVCpu->iem.s;
8470 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8471 if (!pEvtRec)
8472 return;
8473 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8474 pEvtRec->u.RamWrite.GCPhys = GCPhys;
8475 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
8476 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
8477 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
8478 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
8479 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
8480 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8481 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8482}
8483
8484
8485/**
8486 * IOMIOPortRead notification.
8487 */
8488VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
8489{
8490 PVMCPU pVCpu = VMMGetCpu(pVM);
8491 if (!pVCpu)
8492 return;
8493 PIEMCPU pIemCpu = &pVCpu->iem.s;
8494 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8495 if (!pEvtRec)
8496 return;
8497 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8498 pEvtRec->u.IOPortRead.Port = Port;
8499 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8500 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8501 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8502}
8503
8504/**
8505 * IOMIOPortWrite notification.
8506 */
8507VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8508{
8509 PVMCPU pVCpu = VMMGetCpu(pVM);
8510 if (!pVCpu)
8511 return;
8512 PIEMCPU pIemCpu = &pVCpu->iem.s;
8513 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8514 if (!pEvtRec)
8515 return;
8516 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8517 pEvtRec->u.IOPortWrite.Port = Port;
8518 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8519 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8520 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8521 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8522}
8523
8524
8525VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
8526{
8527 AssertFailed();
8528}
8529
8530
8531VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
8532{
8533 AssertFailed();
8534}
8535
8536
8537/**
8538 * Fakes and records an I/O port read.
8539 *
8540 * @returns VINF_SUCCESS.
8541 * @param pIemCpu The IEM per CPU data.
8542 * @param Port The I/O port.
8543 * @param pu32Value Where to store the fake value.
8544 * @param cbValue The size of the access.
8545 */
8546static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8547{
8548 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8549 if (pEvtRec)
8550 {
8551 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8552 pEvtRec->u.IOPortRead.Port = Port;
8553 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8554 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8555 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8556 }
8557 pIemCpu->cIOReads++;
8558 *pu32Value = 0xcccccccc;
8559 return VINF_SUCCESS;
8560}
8561
8562
8563/**
8564 * Fakes and records an I/O port write.
8565 *
8566 * @returns VINF_SUCCESS.
8567 * @param pIemCpu The IEM per CPU data.
8568 * @param Port The I/O port.
8569 * @param u32Value The value being written.
8570 * @param cbValue The size of the access.
8571 */
8572static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8573{
8574 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8575 if (pEvtRec)
8576 {
8577 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8578 pEvtRec->u.IOPortWrite.Port = Port;
8579 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8580 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8581 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8582 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8583 }
8584 pIemCpu->cIOWrites++;
8585 return VINF_SUCCESS;
8586}
8587
8588
8589/**
8590 * Used to add extra details about a stub case.
8591 * @param pIemCpu The IEM per CPU state.
8592 */
8593static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
8594{
8595 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8596 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8597 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8598 char szRegs[4096];
8599 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
8600 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
8601 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
8602 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
8603 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
8604 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
8605 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
8606 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
8607 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
8608 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
8609 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
8610 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
8611 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
8612 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
8613 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
8614 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
8615 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
8616 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
8617 " efer=%016VR{efer}\n"
8618 " pat=%016VR{pat}\n"
8619 " sf_mask=%016VR{sf_mask}\n"
8620 "krnl_gs_base=%016VR{krnl_gs_base}\n"
8621 " lstar=%016VR{lstar}\n"
8622 " star=%016VR{star} cstar=%016VR{cstar}\n"
8623 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
8624 );
8625
8626 char szInstr1[256];
8627 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
8628 DBGF_DISAS_FLAGS_DEFAULT_MODE,
8629 szInstr1, sizeof(szInstr1), NULL);
8630 char szInstr2[256];
8631 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
8632 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8633 szInstr2, sizeof(szInstr2), NULL);
8634
8635 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
8636}
8637
8638
8639/**
8640 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
8641 * dump to the assertion info.
8642 *
8643 * @param pEvtRec The record to dump.
8644 */
8645static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
8646{
8647 switch (pEvtRec->enmEvent)
8648 {
8649 case IEMVERIFYEVENT_IOPORT_READ:
8650 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
8651 pEvtRec->u.IOPortWrite.Port,
8652 pEvtRec->u.IOPortWrite.cbValue);
8653 break;
8654 case IEMVERIFYEVENT_IOPORT_WRITE:
8655 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
8656 pEvtRec->u.IOPortWrite.Port,
8657 pEvtRec->u.IOPortWrite.cbValue,
8658 pEvtRec->u.IOPortWrite.u32Value);
8659 break;
8660 case IEMVERIFYEVENT_RAM_READ:
8661 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
8662 pEvtRec->u.RamRead.GCPhys,
8663 pEvtRec->u.RamRead.cb);
8664 break;
8665 case IEMVERIFYEVENT_RAM_WRITE:
8666 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
8667 pEvtRec->u.RamWrite.GCPhys,
8668 pEvtRec->u.RamWrite.cb,
8669 (int)pEvtRec->u.RamWrite.cb,
8670 pEvtRec->u.RamWrite.ab);
8671 break;
8672 default:
8673 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
8674 break;
8675 }
8676}
8677
8678
8679/**
8680 * Raises an assertion on the specified record, showing the given message with
8681 * a record dump attached.
8682 *
8683 * @param pIemCpu The IEM per CPU data.
8684 * @param pEvtRec1 The first record.
8685 * @param pEvtRec2 The second record.
8686 * @param pszMsg The message explaining why we're asserting.
8687 */
8688static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
8689{
8690 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8691 iemVerifyAssertAddRecordDump(pEvtRec1);
8692 iemVerifyAssertAddRecordDump(pEvtRec2);
8693 iemVerifyAssertMsg2(pIemCpu);
8694 RTAssertPanic();
8695}
8696
8697
8698/**
8699 * Raises an assertion on the specified record, showing the given message with
8700 * a record dump attached.
8701 *
8702 * @param pIemCpu The IEM per CPU data.
8703 * @param pEvtRec1 The first record.
8704 * @param pszMsg The message explaining why we're asserting.
8705 */
8706static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
8707{
8708 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8709 iemVerifyAssertAddRecordDump(pEvtRec);
8710 iemVerifyAssertMsg2(pIemCpu);
8711 RTAssertPanic();
8712}
8713
8714
8715/**
8716 * Verifies a write record.
8717 *
8718 * @param pIemCpu The IEM per CPU data.
8719 * @param pEvtRec The write record.
8720 */
8721static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
8722{
8723 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
8724 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
8725 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
8726 if ( RT_FAILURE(rc)
8727 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
8728 {
8729 /* fend off ins */
8730 if ( !pIemCpu->cIOReads
8731 || pEvtRec->u.RamWrite.ab[0] != 0xcc
8732 || ( pEvtRec->u.RamWrite.cb != 1
8733 && pEvtRec->u.RamWrite.cb != 2
8734 && pEvtRec->u.RamWrite.cb != 4) )
8735 {
8736 /* fend off ROMs */
8737 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
8738 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
8739 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
8740 {
8741 /* fend off fxsave */
8742 if (pEvtRec->u.RamWrite.cb != 512)
8743 {
8744 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8745 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
8746 RTAssertMsg2Add("REM: %.*Rhxs\n"
8747 "IEM: %.*Rhxs\n",
8748 pEvtRec->u.RamWrite.cb, abBuf,
8749 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
8750 iemVerifyAssertAddRecordDump(pEvtRec);
8751 iemVerifyAssertMsg2(pIemCpu);
8752 RTAssertPanic();
8753 }
8754 }
8755 }
8756 }
8757
8758}
8759
8760/**
8761 * Performs the post-execution verfication checks.
8762 */
8763static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
8764{
8765 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8766 return;
8767
8768 /*
8769 * Switch back the state.
8770 */
8771 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
8772 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
8773 Assert(pOrgCtx != pDebugCtx);
8774 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8775
8776 /*
8777 * Execute the instruction in REM.
8778 */
8779 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8780 EMRemLock(pVM);
8781 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
8782 AssertRC(rc);
8783 EMRemUnlock(pVM);
8784
8785 /*
8786 * Compare the register states.
8787 */
8788 unsigned cDiffs = 0;
8789 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
8790 {
8791 //Log(("REM and IEM ends up with different registers!\n"));
8792
8793# define CHECK_FIELD(a_Field) \
8794 do \
8795 { \
8796 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8797 { \
8798 switch (sizeof(pOrgCtx->a_Field)) \
8799 { \
8800 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8801 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8802 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8803 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8804 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8805 } \
8806 cDiffs++; \
8807 } \
8808 } while (0)
8809
8810# define CHECK_BIT_FIELD(a_Field) \
8811 do \
8812 { \
8813 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8814 { \
8815 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8816 cDiffs++; \
8817 } \
8818 } while (0)
8819
8820# define CHECK_SEL(a_Sel) \
8821 do \
8822 { \
8823 CHECK_FIELD(a_Sel.Sel); \
8824 CHECK_FIELD(a_Sel.Attr.u); \
8825 CHECK_FIELD(a_Sel.u64Base); \
8826 CHECK_FIELD(a_Sel.u32Limit); \
8827 CHECK_FIELD(a_Sel.fFlags); \
8828 } while (0)
8829
8830#if 1 /* The recompiler doesn't update these the intel way. */
8831 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8832 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8833 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8834 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8835 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8836 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8837 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8838 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8839 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8840 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8841#endif
8842 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8843 {
8844 RTAssertMsg2Weak(" the FPU state differs\n");
8845 cDiffs++;
8846 CHECK_FIELD(fpu.FCW);
8847 CHECK_FIELD(fpu.FSW);
8848 CHECK_FIELD(fpu.FTW);
8849 CHECK_FIELD(fpu.FOP);
8850 CHECK_FIELD(fpu.FPUIP);
8851 CHECK_FIELD(fpu.CS);
8852 CHECK_FIELD(fpu.Rsrvd1);
8853 CHECK_FIELD(fpu.FPUDP);
8854 CHECK_FIELD(fpu.DS);
8855 CHECK_FIELD(fpu.Rsrvd2);
8856 CHECK_FIELD(fpu.MXCSR);
8857 CHECK_FIELD(fpu.MXCSR_MASK);
8858 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8859 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8860 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8861 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8862 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8863 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8864 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8865 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8866 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8867 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8868 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8869 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8870 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8871 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8872 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8873 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8874 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8875 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8876 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8877 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8878 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8879 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8880 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8881 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8882 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8883 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8884 }
8885 CHECK_FIELD(rip);
8886 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8887 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8888 {
8889 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8890 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8891 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8892 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8893 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8894 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8895 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8896 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8897 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8898 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8899 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8900 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8901 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8902 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8903 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8904 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8905 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8906 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8907 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8908 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8909 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8910 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8911 }
8912
8913 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8914 CHECK_FIELD(rax);
8915 CHECK_FIELD(rcx);
8916 if (!pIemCpu->fIgnoreRaxRdx)
8917 CHECK_FIELD(rdx);
8918 CHECK_FIELD(rbx);
8919 CHECK_FIELD(rsp);
8920 CHECK_FIELD(rbp);
8921 CHECK_FIELD(rsi);
8922 CHECK_FIELD(rdi);
8923 CHECK_FIELD(r8);
8924 CHECK_FIELD(r9);
8925 CHECK_FIELD(r10);
8926 CHECK_FIELD(r11);
8927 CHECK_FIELD(r12);
8928 CHECK_FIELD(r13);
8929 CHECK_SEL(cs);
8930 CHECK_SEL(ss);
8931 CHECK_SEL(ds);
8932 CHECK_SEL(es);
8933 CHECK_SEL(fs);
8934 CHECK_SEL(gs);
8935 CHECK_FIELD(cr0);
8936 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8937 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8938 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8939 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8940 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8941 {
8942 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8943 { /* ignore */ }
8944 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8945 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8946 { /* ignore */ }
8947 else
8948 CHECK_FIELD(cr2);
8949 }
8950 CHECK_FIELD(cr3);
8951 CHECK_FIELD(cr4);
8952 CHECK_FIELD(dr[0]);
8953 CHECK_FIELD(dr[1]);
8954 CHECK_FIELD(dr[2]);
8955 CHECK_FIELD(dr[3]);
8956 CHECK_FIELD(dr[6]);
8957 if ((pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
8958 CHECK_FIELD(dr[7]);
8959 CHECK_FIELD(gdtr.cbGdt);
8960 CHECK_FIELD(gdtr.pGdt);
8961 CHECK_FIELD(idtr.cbIdt);
8962 CHECK_FIELD(idtr.pIdt);
8963 CHECK_SEL(ldtr);
8964 CHECK_SEL(tr);
8965 CHECK_FIELD(SysEnter.cs);
8966 CHECK_FIELD(SysEnter.eip);
8967 CHECK_FIELD(SysEnter.esp);
8968 CHECK_FIELD(msrEFER);
8969 CHECK_FIELD(msrSTAR);
8970 CHECK_FIELD(msrPAT);
8971 CHECK_FIELD(msrLSTAR);
8972 CHECK_FIELD(msrCSTAR);
8973 CHECK_FIELD(msrSFMASK);
8974 CHECK_FIELD(msrKERNELGSBASE);
8975
8976 if (cDiffs != 0)
8977 {
8978 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
8979 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8980 iemVerifyAssertMsg2(pIemCpu);
8981 RTAssertPanic();
8982 }
8983# undef CHECK_FIELD
8984# undef CHECK_BIT_FIELD
8985 }
8986
8987 /*
8988 * If the register state compared fine, check the verification event
8989 * records.
8990 */
8991 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8992 {
8993 /*
8994 * Compare verficiation event records.
8995 * - I/O port accesses should be a 1:1 match.
8996 */
8997 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8998 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8999 while (pIemRec && pOtherRec)
9000 {
9001 /* Since we might miss RAM writes and reads, ignore reads and check
9002 that any written memory is the same extra ones. */
9003 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
9004 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
9005 && pIemRec->pNext)
9006 {
9007 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9008 iemVerifyWriteRecord(pIemCpu, pIemRec);
9009 pIemRec = pIemRec->pNext;
9010 }
9011
9012 /* Do the compare. */
9013 if (pIemRec->enmEvent != pOtherRec->enmEvent)
9014 {
9015 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
9016 break;
9017 }
9018 bool fEquals;
9019 switch (pIemRec->enmEvent)
9020 {
9021 case IEMVERIFYEVENT_IOPORT_READ:
9022 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
9023 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
9024 break;
9025 case IEMVERIFYEVENT_IOPORT_WRITE:
9026 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
9027 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
9028 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
9029 break;
9030 case IEMVERIFYEVENT_RAM_READ:
9031 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
9032 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
9033 break;
9034 case IEMVERIFYEVENT_RAM_WRITE:
9035 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
9036 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
9037 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
9038 break;
9039 default:
9040 fEquals = false;
9041 break;
9042 }
9043 if (!fEquals)
9044 {
9045 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
9046 break;
9047 }
9048
9049 /* advance */
9050 pIemRec = pIemRec->pNext;
9051 pOtherRec = pOtherRec->pNext;
9052 }
9053
9054 /* Ignore extra writes and reads. */
9055 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
9056 {
9057 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9058 iemVerifyWriteRecord(pIemCpu, pIemRec);
9059 pIemRec = pIemRec->pNext;
9060 }
9061 if (pIemRec != NULL)
9062 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
9063 else if (pOtherRec != NULL)
9064 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
9065 }
9066 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
9067}
9068
9069#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9070
9071/* stubs */
9072static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9073{
9074 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
9075 return VERR_INTERNAL_ERROR;
9076}
9077
9078static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9079{
9080 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
9081 return VERR_INTERNAL_ERROR;
9082}
9083
9084#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9085
9086
9087/**
9088 * Makes status code addjustments (pass up from I/O and access handler)
9089 * as well as maintaining statistics.
9090 *
9091 * @returns Strict VBox status code to pass up.
9092 * @param pIemCpu The IEM per CPU data.
9093 * @param rcStrict The status from executing an instruction.
9094 */
9095DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
9096{
9097 if (rcStrict != VINF_SUCCESS)
9098 {
9099 if (RT_SUCCESS(rcStrict))
9100 {
9101 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
9102 || rcStrict == VINF_IOM_R3_IOPORT_READ
9103 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
9104 || rcStrict == VINF_IOM_R3_MMIO_READ
9105 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
9106 || rcStrict == VINF_IOM_R3_MMIO_WRITE
9107 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9108 int32_t const rcPassUp = pIemCpu->rcPassUp;
9109 if (rcPassUp == VINF_SUCCESS)
9110 pIemCpu->cRetInfStatuses++;
9111 else if ( rcPassUp < VINF_EM_FIRST
9112 || rcPassUp > VINF_EM_LAST
9113 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
9114 {
9115 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9116 pIemCpu->cRetPassUpStatus++;
9117 rcStrict = rcPassUp;
9118 }
9119 else
9120 {
9121 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9122 pIemCpu->cRetInfStatuses++;
9123 }
9124 }
9125 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
9126 pIemCpu->cRetAspectNotImplemented++;
9127 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
9128 pIemCpu->cRetInstrNotImplemented++;
9129#ifdef IEM_VERIFICATION_MODE_FULL
9130 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
9131 rcStrict = VINF_SUCCESS;
9132#endif
9133 else
9134 pIemCpu->cRetErrStatuses++;
9135 }
9136 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
9137 {
9138 pIemCpu->cRetPassUpStatus++;
9139 rcStrict = pIemCpu->rcPassUp;
9140 }
9141
9142 return rcStrict;
9143}
9144
9145
9146/**
9147 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9148 * IEMExecOneWithPrefetchedByPC.
9149 *
9150 * @return Strict VBox status code.
9151 * @param pVCpu The current virtual CPU.
9152 * @param pIemCpu The IEM per CPU data.
9153 * @param fExecuteInhibit If set, execute the instruction following CLI,
9154 * POP SS and MOV SS,GR.
9155 */
9156DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
9157{
9158 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9159 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9160 if (rcStrict == VINF_SUCCESS)
9161 pIemCpu->cInstructions++;
9162//#ifdef DEBUG
9163// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
9164//#endif
9165
9166 /* Execute the next instruction as well if a cli, pop ss or
9167 mov ss, Gr has just completed successfully. */
9168 if ( fExecuteInhibit
9169 && rcStrict == VINF_SUCCESS
9170 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9171 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
9172 {
9173 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
9174 if (rcStrict == VINF_SUCCESS)
9175 {
9176 b; IEM_OPCODE_GET_NEXT_U8(&b);
9177 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9178 if (rcStrict == VINF_SUCCESS)
9179 pIemCpu->cInstructions++;
9180 }
9181 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
9182 }
9183
9184 /*
9185 * Return value fiddling, statistics and sanity assertions.
9186 */
9187 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9188
9189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
9190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
9191#if defined(IEM_VERIFICATION_MODE_FULL)
9192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
9193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
9194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
9195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
9196#endif
9197 return rcStrict;
9198}
9199
9200
9201#ifdef IN_RC
9202/**
9203 * Re-enters raw-mode or ensure we return to ring-3.
9204 *
9205 * @returns rcStrict, maybe modified.
9206 * @param pIemCpu The IEM CPU structure.
9207 * @param pVCpu The cross context virtual CPU structure of the caller.
9208 * @param pCtx The current CPU context.
9209 * @param rcStrict The status code returne by the interpreter.
9210 */
9211DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
9212{
9213 if (!pIemCpu->fInPatchCode)
9214 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
9215 return rcStrict;
9216}
9217#endif
9218
9219
9220/**
9221 * Execute one instruction.
9222 *
9223 * @return Strict VBox status code.
9224 * @param pVCpu The current virtual CPU.
9225 */
9226VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
9227{
9228 PIEMCPU pIemCpu = &pVCpu->iem.s;
9229
9230#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9231 iemExecVerificationModeSetup(pIemCpu);
9232#endif
9233#ifdef LOG_ENABLED
9234 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9235# ifdef IN_RING3
9236 if (LogIs2Enabled())
9237 {
9238 char szInstr[256];
9239 uint32_t cbInstr = 0;
9240 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9241 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9242 szInstr, sizeof(szInstr), &cbInstr);
9243
9244 Log2(("**** "
9245 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9246 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9247 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9248 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9249 " %s\n"
9250 ,
9251 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9252 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9253 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9254 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9255 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9256 szInstr));
9257
9258 if (LogIs3Enabled())
9259 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9260 }
9261 else
9262# endif
9263 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9264 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9265#endif
9266
9267 /*
9268 * Do the decoding and emulation.
9269 */
9270 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9271 if (rcStrict == VINF_SUCCESS)
9272 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9273
9274#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9275 /*
9276 * Assert some sanity.
9277 */
9278 iemExecVerificationModeCheck(pIemCpu);
9279#endif
9280#ifdef IN_RC
9281 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9282#endif
9283 if (rcStrict != VINF_SUCCESS)
9284 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9285 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9286 return rcStrict;
9287}
9288
9289
9290VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9291{
9292 PIEMCPU pIemCpu = &pVCpu->iem.s;
9293 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9294 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9295
9296 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9297 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9298 if (rcStrict == VINF_SUCCESS)
9299 {
9300 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9301 if (pcbWritten)
9302 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9303 }
9304
9305#ifdef IN_RC
9306 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9307#endif
9308 return rcStrict;
9309}
9310
9311
9312VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9313 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9314{
9315 PIEMCPU pIemCpu = &pVCpu->iem.s;
9316 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9317 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9318
9319 VBOXSTRICTRC rcStrict;
9320 if ( cbOpcodeBytes
9321 && pCtx->rip == OpcodeBytesPC)
9322 {
9323 iemInitDecoder(pIemCpu, false);
9324 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9325 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9326 rcStrict = VINF_SUCCESS;
9327 }
9328 else
9329 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9330 if (rcStrict == VINF_SUCCESS)
9331 {
9332 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9333 }
9334
9335#ifdef IN_RC
9336 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9337#endif
9338 return rcStrict;
9339}
9340
9341
9342VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9343{
9344 PIEMCPU pIemCpu = &pVCpu->iem.s;
9345 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9346 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9347
9348 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9349 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9350 if (rcStrict == VINF_SUCCESS)
9351 {
9352 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9353 if (pcbWritten)
9354 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9355 }
9356
9357#ifdef IN_RC
9358 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9359#endif
9360 return rcStrict;
9361}
9362
9363
9364VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9365 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9366{
9367 PIEMCPU pIemCpu = &pVCpu->iem.s;
9368 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9369 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9370
9371 VBOXSTRICTRC rcStrict;
9372 if ( cbOpcodeBytes
9373 && pCtx->rip == OpcodeBytesPC)
9374 {
9375 iemInitDecoder(pIemCpu, true);
9376 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9377 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9378 rcStrict = VINF_SUCCESS;
9379 }
9380 else
9381 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9382 if (rcStrict == VINF_SUCCESS)
9383 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9384
9385#ifdef IN_RC
9386 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9387#endif
9388 return rcStrict;
9389}
9390
9391
9392VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
9393{
9394 PIEMCPU pIemCpu = &pVCpu->iem.s;
9395 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9396
9397 /*
9398 * See if there is an interrupt pending in TRPM and inject it if we can.
9399 */
9400#ifdef IEM_VERIFICATION_MODE_FULL
9401 pIemCpu->uInjectCpl = UINT8_MAX;
9402#endif
9403 if ( pCtx->eflags.Bits.u1IF
9404 && TRPMHasTrap(pVCpu)
9405 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
9406 {
9407 uint8_t u8TrapNo;
9408 TRPMEVENT enmType;
9409 RTGCUINT uErrCode;
9410 RTGCPTR uCr2;
9411 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9412 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
9413 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9414 TRPMResetTrap(pVCpu);
9415 }
9416
9417 /*
9418 * Log the state.
9419 */
9420#ifdef LOG_ENABLED
9421# ifdef IN_RING3
9422 if (LogIs2Enabled())
9423 {
9424 char szInstr[256];
9425 uint32_t cbInstr = 0;
9426 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9427 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9428 szInstr, sizeof(szInstr), &cbInstr);
9429
9430 Log2(("**** "
9431 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9432 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9433 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9434 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9435 " %s\n"
9436 ,
9437 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9438 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9439 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9440 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9441 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9442 szInstr));
9443
9444 if (LogIs3Enabled())
9445 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9446 }
9447 else
9448# endif
9449 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9450 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9451#endif
9452
9453 /*
9454 * Do the decoding and emulation.
9455 */
9456 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9457 if (rcStrict == VINF_SUCCESS)
9458 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9459
9460 /*
9461 * Maybe re-enter raw-mode and log.
9462 */
9463#ifdef IN_RC
9464 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9465#endif
9466 if (rcStrict != VINF_SUCCESS)
9467 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9468 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9469 return rcStrict;
9470}
9471
9472
9473
9474/**
9475 * Injects a trap, fault, abort, software interrupt or external interrupt.
9476 *
9477 * The parameter list matches TRPMQueryTrapAll pretty closely.
9478 *
9479 * @returns Strict VBox status code.
9480 * @param pVCpu The current virtual CPU.
9481 * @param u8TrapNo The trap number.
9482 * @param enmType What type is it (trap/fault/abort), software
9483 * interrupt or hardware interrupt.
9484 * @param uErrCode The error code if applicable.
9485 * @param uCr2 The CR2 value if applicable.
9486 */
9487VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
9488{
9489 iemInitDecoder(&pVCpu->iem.s, false);
9490
9491 uint32_t fFlags;
9492 switch (enmType)
9493 {
9494 case TRPM_HARDWARE_INT:
9495 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9496 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9497 uErrCode = uCr2 = 0;
9498 break;
9499
9500 case TRPM_SOFTWARE_INT:
9501 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9502 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9503 uErrCode = uCr2 = 0;
9504 break;
9505
9506 case TRPM_TRAP:
9507 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9508 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9509 if (u8TrapNo == X86_XCPT_PF)
9510 fFlags |= IEM_XCPT_FLAGS_CR2;
9511 switch (u8TrapNo)
9512 {
9513 case X86_XCPT_DF:
9514 case X86_XCPT_TS:
9515 case X86_XCPT_NP:
9516 case X86_XCPT_SS:
9517 case X86_XCPT_PF:
9518 case X86_XCPT_AC:
9519 fFlags |= IEM_XCPT_FLAGS_ERR;
9520 break;
9521 }
9522 break;
9523
9524 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9525 }
9526
9527 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
9528}
9529
9530
9531VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9532{
9533 return VERR_NOT_IMPLEMENTED;
9534}
9535
9536
9537VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9538{
9539 return VERR_NOT_IMPLEMENTED;
9540}
9541
9542
9543#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
9544/**
9545 * Executes a IRET instruction with default operand size.
9546 *
9547 * This is for PATM.
9548 *
9549 * @returns VBox status code.
9550 * @param pVCpu The current virtual CPU.
9551 * @param pCtxCore The register frame.
9552 */
9553VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
9554{
9555 PIEMCPU pIemCpu = &pVCpu->iem.s;
9556 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9557
9558 iemCtxCoreToCtx(pCtx, pCtxCore);
9559 iemInitDecoder(pIemCpu);
9560 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
9561 if (rcStrict == VINF_SUCCESS)
9562 iemCtxToCtxCore(pCtxCore, pCtx);
9563 else
9564 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9565 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9566 return rcStrict;
9567}
9568#endif
9569
9570
9571
9572/**
9573 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9574 *
9575 * This API ASSUMES that the caller has already verified that the guest code is
9576 * allowed to access the I/O port. (The I/O port is in the DX register in the
9577 * guest state.)
9578 *
9579 * @returns Strict VBox status code.
9580 * @param pVCpu The cross context per virtual CPU structure.
9581 * @param cbValue The size of the I/O port access (1, 2, or 4).
9582 * @param enmAddrMode The addressing mode.
9583 * @param fRepPrefix Indicates whether a repeat prefix is used
9584 * (doesn't matter which for this instruction).
9585 * @param cbInstr The instruction length in bytes.
9586 * @param iEffSeg The effective segment address.
9587 */
9588VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9589 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
9590{
9591 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9592 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
9593
9594 /*
9595 * State init.
9596 */
9597 PIEMCPU pIemCpu = &pVCpu->iem.s;
9598 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
9599
9600 /*
9601 * Switch orgy for getting to the right handler.
9602 */
9603 VBOXSTRICTRC rcStrict;
9604 if (fRepPrefix)
9605 {
9606 switch (enmAddrMode)
9607 {
9608 case IEMMODE_16BIT:
9609 switch (cbValue)
9610 {
9611 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9612 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9613 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9614 default:
9615 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9616 }
9617 break;
9618
9619 case IEMMODE_32BIT:
9620 switch (cbValue)
9621 {
9622 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9623 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9624 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9625 default:
9626 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9627 }
9628 break;
9629
9630 case IEMMODE_64BIT:
9631 switch (cbValue)
9632 {
9633 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9634 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9635 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9636 default:
9637 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9638 }
9639 break;
9640
9641 default:
9642 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9643 }
9644 }
9645 else
9646 {
9647 switch (enmAddrMode)
9648 {
9649 case IEMMODE_16BIT:
9650 switch (cbValue)
9651 {
9652 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9653 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9654 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9655 default:
9656 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9657 }
9658 break;
9659
9660 case IEMMODE_32BIT:
9661 switch (cbValue)
9662 {
9663 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9664 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9665 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9666 default:
9667 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9668 }
9669 break;
9670
9671 case IEMMODE_64BIT:
9672 switch (cbValue)
9673 {
9674 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9675 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9676 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
9677 default:
9678 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9679 }
9680 break;
9681
9682 default:
9683 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9684 }
9685 }
9686
9687 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9688}
9689
9690
9691/**
9692 * Interface for HM and EM for executing string I/O IN (read) instructions.
9693 *
9694 * This API ASSUMES that the caller has already verified that the guest code is
9695 * allowed to access the I/O port. (The I/O port is in the DX register in the
9696 * guest state.)
9697 *
9698 * @returns Strict VBox status code.
9699 * @param pVCpu The cross context per virtual CPU structure.
9700 * @param cbValue The size of the I/O port access (1, 2, or 4).
9701 * @param enmAddrMode The addressing mode.
9702 * @param fRepPrefix Indicates whether a repeat prefix is used
9703 * (doesn't matter which for this instruction).
9704 * @param cbInstr The instruction length in bytes.
9705 */
9706VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9707 bool fRepPrefix, uint8_t cbInstr)
9708{
9709 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
9710
9711 /*
9712 * State init.
9713 */
9714 PIEMCPU pIemCpu = &pVCpu->iem.s;
9715 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
9716
9717 /*
9718 * Switch orgy for getting to the right handler.
9719 */
9720 VBOXSTRICTRC rcStrict;
9721 if (fRepPrefix)
9722 {
9723 switch (enmAddrMode)
9724 {
9725 case IEMMODE_16BIT:
9726 switch (cbValue)
9727 {
9728 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9729 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9730 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9731 default:
9732 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9733 }
9734 break;
9735
9736 case IEMMODE_32BIT:
9737 switch (cbValue)
9738 {
9739 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9740 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9741 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9742 default:
9743 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9744 }
9745 break;
9746
9747 case IEMMODE_64BIT:
9748 switch (cbValue)
9749 {
9750 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9751 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9752 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9753 default:
9754 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9755 }
9756 break;
9757
9758 default:
9759 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9760 }
9761 }
9762 else
9763 {
9764 switch (enmAddrMode)
9765 {
9766 case IEMMODE_16BIT:
9767 switch (cbValue)
9768 {
9769 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9770 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9771 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9772 default:
9773 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9774 }
9775 break;
9776
9777 case IEMMODE_32BIT:
9778 switch (cbValue)
9779 {
9780 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9781 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9782 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9783 default:
9784 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9785 }
9786 break;
9787
9788 case IEMMODE_64BIT:
9789 switch (cbValue)
9790 {
9791 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9792 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9793 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
9794 default:
9795 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9796 }
9797 break;
9798
9799 default:
9800 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9801 }
9802 }
9803
9804 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9805}
9806
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette