VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 47396

Last change on this file since 47396 was 47394, checked in by vboxsync, 12 years ago

IEM: movd/movq overhaul and both directions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 342.2 KB
Line 
1/* $Id: IEMAll.cpp 47394 2013-07-25 13:18:51Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pdm.h>
86#include <VBox/vmm/pgm.h>
87#include <internal/pgm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/em.h>
90#include <VBox/vmm/hm.h>
91#include <VBox/vmm/tm.h>
92#include <VBox/vmm/dbgf.h>
93#ifdef VBOX_WITH_RAW_MODE_NOT_R0
94# include <VBox/vmm/patm.h>
95#endif
96#include "IEMInternal.h"
97#ifdef IEM_VERIFICATION_MODE_FULL
98# include <VBox/vmm/rem.h>
99# include <VBox/vmm/mm.h>
100#endif
101#include <VBox/vmm/vm.h>
102#include <VBox/log.h>
103#include <VBox/err.h>
104#include <VBox/param.h>
105#include <iprt/assert.h>
106#include <iprt/string.h>
107#include <iprt/x86.h>
108
109
110/*******************************************************************************
111* Structures and Typedefs *
112*******************************************************************************/
113/** @typedef PFNIEMOP
114 * Pointer to an opcode decoder function.
115 */
116
117/** @def FNIEMOP_DEF
118 * Define an opcode decoder function.
119 *
120 * We're using macors for this so that adding and removing parameters as well as
121 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
122 *
123 * @param a_Name The function name.
124 */
125
126
127#if defined(__GNUC__) && defined(RT_ARCH_X86)
128typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
129# define FNIEMOP_DEF(a_Name) \
130 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
131# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
132 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
133# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
134 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
135
136#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
137typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
138# define FNIEMOP_DEF(a_Name) \
139 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
140# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
141 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
142# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
143 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
144
145#elif defined(__GNUC__)
146typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
147# define FNIEMOP_DEF(a_Name) \
148 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
149# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
150 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
151# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
152 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
153
154#else
155typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
156# define FNIEMOP_DEF(a_Name) \
157 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
158# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
159 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
160# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
161 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
162
163#endif
164
165
166/**
167 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
168 */
169typedef union IEMSELDESC
170{
171 /** The legacy view. */
172 X86DESC Legacy;
173 /** The long mode view. */
174 X86DESC64 Long;
175} IEMSELDESC;
176/** Pointer to a selector descriptor table entry. */
177typedef IEMSELDESC *PIEMSELDESC;
178
179
180/*******************************************************************************
181* Defined Constants And Macros *
182*******************************************************************************/
183/** @name IEM status codes.
184 *
185 * Not quite sure how this will play out in the end, just aliasing safe status
186 * codes for now.
187 *
188 * @{ */
189#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
190/** @} */
191
192/** Temporary hack to disable the double execution. Will be removed in favor
193 * of a dedicated execution mode in EM. */
194//#define IEM_VERIFICATION_MODE_NO_REM
195
196/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
197 * due to GCC lacking knowledge about the value range of a switch. */
198#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
199
200/**
201 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
202 * occation.
203 */
204#ifdef LOG_ENABLED
205# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
206 do { \
207 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
208 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
209 } while (0)
210#else
211# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
212 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
213#endif
214
215/**
216 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
217 * occation using the supplied logger statement.
218 *
219 * @param a_LoggerArgs What to log on failure.
220 */
221#ifdef LOG_ENABLED
222# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
223 do { \
224 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
225 /*LogFunc(a_LoggerArgs);*/ \
226 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
227 } while (0)
228#else
229# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
230 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
231#endif
232
233/**
234 * Call an opcode decoder function.
235 *
236 * We're using macors for this so that adding and removing parameters can be
237 * done as we please. See FNIEMOP_DEF.
238 */
239#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
240
241/**
242 * Call a common opcode decoder function taking one extra argument.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF_1.
246 */
247#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
256
257/**
258 * Check if we're currently executing in real or virtual 8086 mode.
259 *
260 * @returns @c true if it is, @c false if not.
261 * @param a_pIemCpu The IEM state of the current CPU.
262 */
263#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
264
265/**
266 * Check if we're currently executing in long mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in real mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
283 */
284#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
285
286/**
287 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
288 */
289#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
290
291/**
292 * Tests if at least on of the specified AMD CPUID features (extended) are
293 * marked present.
294 */
295#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
296
297/**
298 * Checks if an Intel CPUID feature is present.
299 */
300#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
301 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
302 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
303
304/**
305 * Checks if an Intel CPUID feature is present.
306 */
307#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
308 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
309
310/**
311 * Checks if an Intel CPUID feature is present in the host CPU.
312 */
313#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
314 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
315
316/**
317 * Evaluates to true if we're presenting an Intel CPU to the guest.
318 */
319#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
320
321/**
322 * Evaluates to true if we're presenting an AMD CPU to the guest.
323 */
324#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
325
326/**
327 * Check if the address is canonical.
328 */
329#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
330
331
332/*******************************************************************************
333* Global Variables *
334*******************************************************************************/
335extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
336
337
338/** Function table for the ADD instruction. */
339static const IEMOPBINSIZES g_iemAImpl_add =
340{
341 iemAImpl_add_u8, iemAImpl_add_u8_locked,
342 iemAImpl_add_u16, iemAImpl_add_u16_locked,
343 iemAImpl_add_u32, iemAImpl_add_u32_locked,
344 iemAImpl_add_u64, iemAImpl_add_u64_locked
345};
346
347/** Function table for the ADC instruction. */
348static const IEMOPBINSIZES g_iemAImpl_adc =
349{
350 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
351 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
352 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
353 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
354};
355
356/** Function table for the SUB instruction. */
357static const IEMOPBINSIZES g_iemAImpl_sub =
358{
359 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
360 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
361 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
362 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
363};
364
365/** Function table for the SBB instruction. */
366static const IEMOPBINSIZES g_iemAImpl_sbb =
367{
368 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
369 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
370 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
371 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
372};
373
374/** Function table for the OR instruction. */
375static const IEMOPBINSIZES g_iemAImpl_or =
376{
377 iemAImpl_or_u8, iemAImpl_or_u8_locked,
378 iemAImpl_or_u16, iemAImpl_or_u16_locked,
379 iemAImpl_or_u32, iemAImpl_or_u32_locked,
380 iemAImpl_or_u64, iemAImpl_or_u64_locked
381};
382
383/** Function table for the XOR instruction. */
384static const IEMOPBINSIZES g_iemAImpl_xor =
385{
386 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
387 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
388 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
389 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
390};
391
392/** Function table for the AND instruction. */
393static const IEMOPBINSIZES g_iemAImpl_and =
394{
395 iemAImpl_and_u8, iemAImpl_and_u8_locked,
396 iemAImpl_and_u16, iemAImpl_and_u16_locked,
397 iemAImpl_and_u32, iemAImpl_and_u32_locked,
398 iemAImpl_and_u64, iemAImpl_and_u64_locked
399};
400
401/** Function table for the CMP instruction.
402 * @remarks Making operand order ASSUMPTIONS.
403 */
404static const IEMOPBINSIZES g_iemAImpl_cmp =
405{
406 iemAImpl_cmp_u8, NULL,
407 iemAImpl_cmp_u16, NULL,
408 iemAImpl_cmp_u32, NULL,
409 iemAImpl_cmp_u64, NULL
410};
411
412/** Function table for the TEST instruction.
413 * @remarks Making operand order ASSUMPTIONS.
414 */
415static const IEMOPBINSIZES g_iemAImpl_test =
416{
417 iemAImpl_test_u8, NULL,
418 iemAImpl_test_u16, NULL,
419 iemAImpl_test_u32, NULL,
420 iemAImpl_test_u64, NULL
421};
422
423/** Function table for the BT instruction. */
424static const IEMOPBINSIZES g_iemAImpl_bt =
425{
426 NULL, NULL,
427 iemAImpl_bt_u16, NULL,
428 iemAImpl_bt_u32, NULL,
429 iemAImpl_bt_u64, NULL
430};
431
432/** Function table for the BTC instruction. */
433static const IEMOPBINSIZES g_iemAImpl_btc =
434{
435 NULL, NULL,
436 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
437 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
438 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
439};
440
441/** Function table for the BTR instruction. */
442static const IEMOPBINSIZES g_iemAImpl_btr =
443{
444 NULL, NULL,
445 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
446 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
447 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
448};
449
450/** Function table for the BTS instruction. */
451static const IEMOPBINSIZES g_iemAImpl_bts =
452{
453 NULL, NULL,
454 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
455 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
456 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
457};
458
459/** Function table for the BSF instruction. */
460static const IEMOPBINSIZES g_iemAImpl_bsf =
461{
462 NULL, NULL,
463 iemAImpl_bsf_u16, NULL,
464 iemAImpl_bsf_u32, NULL,
465 iemAImpl_bsf_u64, NULL
466};
467
468/** Function table for the BSR instruction. */
469static const IEMOPBINSIZES g_iemAImpl_bsr =
470{
471 NULL, NULL,
472 iemAImpl_bsr_u16, NULL,
473 iemAImpl_bsr_u32, NULL,
474 iemAImpl_bsr_u64, NULL
475};
476
477/** Function table for the IMUL instruction. */
478static const IEMOPBINSIZES g_iemAImpl_imul_two =
479{
480 NULL, NULL,
481 iemAImpl_imul_two_u16, NULL,
482 iemAImpl_imul_two_u32, NULL,
483 iemAImpl_imul_two_u64, NULL
484};
485
486/** Group 1 /r lookup table. */
487static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
488{
489 &g_iemAImpl_add,
490 &g_iemAImpl_or,
491 &g_iemAImpl_adc,
492 &g_iemAImpl_sbb,
493 &g_iemAImpl_and,
494 &g_iemAImpl_sub,
495 &g_iemAImpl_xor,
496 &g_iemAImpl_cmp
497};
498
499/** Function table for the INC instruction. */
500static const IEMOPUNARYSIZES g_iemAImpl_inc =
501{
502 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
503 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
504 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
505 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
506};
507
508/** Function table for the DEC instruction. */
509static const IEMOPUNARYSIZES g_iemAImpl_dec =
510{
511 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
512 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
513 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
514 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
515};
516
517/** Function table for the NEG instruction. */
518static const IEMOPUNARYSIZES g_iemAImpl_neg =
519{
520 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
521 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
522 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
523 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
524};
525
526/** Function table for the NOT instruction. */
527static const IEMOPUNARYSIZES g_iemAImpl_not =
528{
529 iemAImpl_not_u8, iemAImpl_not_u8_locked,
530 iemAImpl_not_u16, iemAImpl_not_u16_locked,
531 iemAImpl_not_u32, iemAImpl_not_u32_locked,
532 iemAImpl_not_u64, iemAImpl_not_u64_locked
533};
534
535
536/** Function table for the ROL instruction. */
537static const IEMOPSHIFTSIZES g_iemAImpl_rol =
538{
539 iemAImpl_rol_u8,
540 iemAImpl_rol_u16,
541 iemAImpl_rol_u32,
542 iemAImpl_rol_u64
543};
544
545/** Function table for the ROR instruction. */
546static const IEMOPSHIFTSIZES g_iemAImpl_ror =
547{
548 iemAImpl_ror_u8,
549 iemAImpl_ror_u16,
550 iemAImpl_ror_u32,
551 iemAImpl_ror_u64
552};
553
554/** Function table for the RCL instruction. */
555static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
556{
557 iemAImpl_rcl_u8,
558 iemAImpl_rcl_u16,
559 iemAImpl_rcl_u32,
560 iemAImpl_rcl_u64
561};
562
563/** Function table for the RCR instruction. */
564static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
565{
566 iemAImpl_rcr_u8,
567 iemAImpl_rcr_u16,
568 iemAImpl_rcr_u32,
569 iemAImpl_rcr_u64
570};
571
572/** Function table for the SHL instruction. */
573static const IEMOPSHIFTSIZES g_iemAImpl_shl =
574{
575 iemAImpl_shl_u8,
576 iemAImpl_shl_u16,
577 iemAImpl_shl_u32,
578 iemAImpl_shl_u64
579};
580
581/** Function table for the SHR instruction. */
582static const IEMOPSHIFTSIZES g_iemAImpl_shr =
583{
584 iemAImpl_shr_u8,
585 iemAImpl_shr_u16,
586 iemAImpl_shr_u32,
587 iemAImpl_shr_u64
588};
589
590/** Function table for the SAR instruction. */
591static const IEMOPSHIFTSIZES g_iemAImpl_sar =
592{
593 iemAImpl_sar_u8,
594 iemAImpl_sar_u16,
595 iemAImpl_sar_u32,
596 iemAImpl_sar_u64
597};
598
599
600/** Function table for the MUL instruction. */
601static const IEMOPMULDIVSIZES g_iemAImpl_mul =
602{
603 iemAImpl_mul_u8,
604 iemAImpl_mul_u16,
605 iemAImpl_mul_u32,
606 iemAImpl_mul_u64
607};
608
609/** Function table for the IMUL instruction working implicitly on rAX. */
610static const IEMOPMULDIVSIZES g_iemAImpl_imul =
611{
612 iemAImpl_imul_u8,
613 iemAImpl_imul_u16,
614 iemAImpl_imul_u32,
615 iemAImpl_imul_u64
616};
617
618/** Function table for the DIV instruction. */
619static const IEMOPMULDIVSIZES g_iemAImpl_div =
620{
621 iemAImpl_div_u8,
622 iemAImpl_div_u16,
623 iemAImpl_div_u32,
624 iemAImpl_div_u64
625};
626
627/** Function table for the MUL instruction. */
628static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
629{
630 iemAImpl_idiv_u8,
631 iemAImpl_idiv_u16,
632 iemAImpl_idiv_u32,
633 iemAImpl_idiv_u64
634};
635
636/** Function table for the SHLD instruction */
637static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
638{
639 iemAImpl_shld_u16,
640 iemAImpl_shld_u32,
641 iemAImpl_shld_u64,
642};
643
644/** Function table for the SHRD instruction */
645static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
646{
647 iemAImpl_shrd_u16,
648 iemAImpl_shrd_u32,
649 iemAImpl_shrd_u64,
650};
651
652
653/** Function table for the PUNPCKLBW instruction */
654static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
655/** Function table for the PUNPCKLBD instruction */
656static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
657/** Function table for the PUNPCKLDQ instruction */
658static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
659/** Function table for the PUNPCKLQDQ instruction */
660static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
661
662/** Function table for the PUNPCKHBW instruction */
663static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
664/** Function table for the PUNPCKHBD instruction */
665static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
666/** Function table for the PUNPCKHDQ instruction */
667static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
668/** Function table for the PUNPCKHQDQ instruction */
669static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
670
671/** Function table for the PXOR instruction */
672static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
673
674
675#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
676/** What IEM just wrote. */
677uint8_t g_abIemWrote[256];
678/** How much IEM just wrote. */
679size_t g_cbIemWrote;
680#endif
681
682
683/*******************************************************************************
684* Internal Functions *
685*******************************************************************************/
686static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
687/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
688static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
689static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
690static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
691static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
706static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
707static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
708static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
709static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
710
711#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
712static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
713#endif
714static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
715static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
716
717
718/**
719 * Sets the pass up status.
720 *
721 * @returns VINF_SUCCESS.
722 * @param pIemCpu The per CPU IEM state of the calling thread.
723 * @param rcPassUp The pass up status. Must be informational.
724 * VINF_SUCCESS is not allowed.
725 */
726static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
727{
728 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
729
730 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
731 if (rcOldPassUp == VINF_SUCCESS)
732 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
733 /* If both are EM scheduling code, use EM priority rules. */
734 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
735 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
736 {
737 if (rcPassUp < rcOldPassUp)
738 {
739 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
740 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
741 }
742 else
743 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
744 }
745 /* Override EM scheduling with specific status code. */
746 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
747 {
748 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
749 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
750 }
751 /* Don't override specific status code, first come first served. */
752 else
753 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * Initializes the decoder state.
760 *
761 * @param pIemCpu The per CPU IEM state.
762 * @param fBypassHandlers Whether to bypass access handlers.
763 */
764DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
765{
766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
767 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
768
769#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
770 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
771 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
773 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
778#endif
779
780#ifdef VBOX_WITH_RAW_MODE_NOT_R0
781 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
782#endif
783 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
784#ifdef IEM_VERIFICATION_MODE_FULL
785 if (pIemCpu->uInjectCpl != UINT8_MAX)
786 pIemCpu->uCpl = pIemCpu->uInjectCpl;
787#endif
788 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
789 ? IEMMODE_64BIT
790 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
791 ? IEMMODE_32BIT
792 : IEMMODE_16BIT;
793 pIemCpu->enmCpuMode = enmMode;
794 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
795 pIemCpu->enmEffAddrMode = enmMode;
796 if (enmMode != IEMMODE_64BIT)
797 {
798 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
799 pIemCpu->enmEffOpSize = enmMode;
800 }
801 else
802 {
803 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
804 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
805 }
806 pIemCpu->fPrefixes = 0;
807 pIemCpu->uRexReg = 0;
808 pIemCpu->uRexB = 0;
809 pIemCpu->uRexIndex = 0;
810 pIemCpu->iEffSeg = X86_SREG_DS;
811 pIemCpu->offOpcode = 0;
812 pIemCpu->cbOpcode = 0;
813 pIemCpu->cActiveMappings = 0;
814 pIemCpu->iNextMapping = 0;
815 pIemCpu->rcPassUp = VINF_SUCCESS;
816 pIemCpu->fBypassHandlers = fBypassHandlers;
817#ifdef IN_RC
818 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
819 && pCtx->cs.u64Base == 0
820 && pCtx->cs.u32Limit == UINT32_MAX
821 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
822 if (!pIemCpu->fInPatchCode)
823 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
824#endif
825}
826
827
828/**
829 * Prefetch opcodes the first time when starting executing.
830 *
831 * @returns Strict VBox status code.
832 * @param pIemCpu The IEM state.
833 * @param fBypassHandlers Whether to bypass access handlers.
834 */
835static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
836{
837#ifdef IEM_VERIFICATION_MODE_FULL
838 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
839#endif
840 iemInitDecoder(pIemCpu, fBypassHandlers);
841
842 /*
843 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
844 *
845 * First translate CS:rIP to a physical address.
846 */
847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
848 uint32_t cbToTryRead;
849 RTGCPTR GCPtrPC;
850 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
851 {
852 cbToTryRead = PAGE_SIZE;
853 GCPtrPC = pCtx->rip;
854 if (!IEM_IS_CANONICAL(GCPtrPC))
855 return iemRaiseGeneralProtectionFault0(pIemCpu);
856 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
857 }
858 else
859 {
860 uint32_t GCPtrPC32 = pCtx->eip;
861 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
862 if (GCPtrPC32 > pCtx->cs.u32Limit)
863 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
864 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
865 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
866 }
867
868#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
869 /* Allow interpretation of patch manager code blocks since they can for
870 instance throw #PFs for perfectly good reasons. */
871 if (pIemCpu->fInPatchCode)
872 {
873 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
874 if (cbToTryRead > cbLeftOnPage)
875 cbToTryRead = cbLeftOnPage;
876 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
877 cbToTryRead = sizeof(pIemCpu->abOpcode);
878 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
879 pIemCpu->cbOpcode = cbToTryRead;
880 return VINF_SUCCESS;
881 }
882#endif
883
884 RTGCPHYS GCPhys;
885 uint64_t fFlags;
886 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
887 if (RT_FAILURE(rc))
888 {
889 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
890 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
891 }
892 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
893 {
894 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
895 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
896 }
897 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
898 {
899 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
900 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
901 }
902 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
903 /** @todo Check reserved bits and such stuff. PGM is better at doing
904 * that, so do it when implementing the guest virtual address
905 * TLB... */
906
907#ifdef IEM_VERIFICATION_MODE_FULL
908 /*
909 * Optimistic optimization: Use unconsumed opcode bytes from the previous
910 * instruction.
911 */
912 /** @todo optimize this differently by not using PGMPhysRead. */
913 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
914 pIemCpu->GCPhysOpcodes = GCPhys;
915 if ( offPrevOpcodes < cbOldOpcodes
916 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
917 {
918 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
919 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
920 pIemCpu->cbOpcode = cbNew;
921 return VINF_SUCCESS;
922 }
923#endif
924
925 /*
926 * Read the bytes at this address.
927 */
928 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
929 if (cbToTryRead > cbLeftOnPage)
930 cbToTryRead = cbLeftOnPage;
931 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
932 cbToTryRead = sizeof(pIemCpu->abOpcode);
933 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
934 * doing that. */
935 if (!pIemCpu->fBypassHandlers)
936 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
937 else
938 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
939 if (rc != VINF_SUCCESS)
940 {
941 /** @todo status code handling */
942 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
943 GCPtrPC, GCPhys, rc, cbToTryRead));
944 return rc;
945 }
946 pIemCpu->cbOpcode = cbToTryRead;
947
948 return VINF_SUCCESS;
949}
950
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pIemCpu The IEM state.
958 * @param cbMin Where to return the opcode byte.
959 */
960static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
961{
962 /*
963 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
964 *
965 * First translate CS:rIP to a physical address.
966 */
967 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
968 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
969 uint32_t cbToTryRead;
970 RTGCPTR GCPtrNext;
971 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
972 {
973 cbToTryRead = PAGE_SIZE;
974 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
975 if (!IEM_IS_CANONICAL(GCPtrNext))
976 return iemRaiseGeneralProtectionFault0(pIemCpu);
977 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
978 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
979 }
980 else
981 {
982 uint32_t GCPtrNext32 = pCtx->eip;
983 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
984 GCPtrNext32 += pIemCpu->cbOpcode;
985 if (GCPtrNext32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
988 if (cbToTryRead < cbMin - cbLeft)
989 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
990 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
991 }
992
993 RTGCPHYS GCPhys;
994 uint64_t fFlags;
995 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
996 if (RT_FAILURE(rc))
997 {
998 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
999 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1000 }
1001 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1002 {
1003 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1004 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1005 }
1006 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1007 {
1008 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1009 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1010 }
1011 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1012 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1013 /** @todo Check reserved bits and such stuff. PGM is better at doing
1014 * that, so do it when implementing the guest virtual address
1015 * TLB... */
1016
1017 /*
1018 * Read the bytes at this address.
1019 */
1020 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1021 if (cbToTryRead > cbLeftOnPage)
1022 cbToTryRead = cbLeftOnPage;
1023 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1024 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1025 Assert(cbToTryRead >= cbMin - cbLeft);
1026 if (!pIemCpu->fBypassHandlers)
1027 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1028 else
1029 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1030 if (rc != VINF_SUCCESS)
1031 {
1032 /** @todo status code handling */
1033 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1034 return rc;
1035 }
1036 pIemCpu->cbOpcode += cbToTryRead;
1037 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1038
1039 return VINF_SUCCESS;
1040}
1041
1042
1043/**
1044 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1045 *
1046 * @returns Strict VBox status code.
1047 * @param pIemCpu The IEM state.
1048 * @param pb Where to return the opcode byte.
1049 */
1050DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1051{
1052 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1053 if (rcStrict == VINF_SUCCESS)
1054 {
1055 uint8_t offOpcode = pIemCpu->offOpcode;
1056 *pb = pIemCpu->abOpcode[offOpcode];
1057 pIemCpu->offOpcode = offOpcode + 1;
1058 }
1059 else
1060 *pb = 0;
1061 return rcStrict;
1062}
1063
1064
1065/**
1066 * Fetches the next opcode byte.
1067 *
1068 * @returns Strict VBox status code.
1069 * @param pIemCpu The IEM state.
1070 * @param pu8 Where to return the opcode byte.
1071 */
1072DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1073{
1074 uint8_t const offOpcode = pIemCpu->offOpcode;
1075 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1076 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1077
1078 *pu8 = pIemCpu->abOpcode[offOpcode];
1079 pIemCpu->offOpcode = offOpcode + 1;
1080 return VINF_SUCCESS;
1081}
1082
1083
1084/**
1085 * Fetches the next opcode byte, returns automatically on failure.
1086 *
1087 * @param a_pu8 Where to return the opcode byte.
1088 * @remark Implicitly references pIemCpu.
1089 */
1090#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1091 do \
1092 { \
1093 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1094 if (rcStrict2 != VINF_SUCCESS) \
1095 return rcStrict2; \
1096 } while (0)
1097
1098
1099/**
1100 * Fetches the next signed byte from the opcode stream.
1101 *
1102 * @returns Strict VBox status code.
1103 * @param pIemCpu The IEM state.
1104 * @param pi8 Where to return the signed byte.
1105 */
1106DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1107{
1108 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1109}
1110
1111
1112/**
1113 * Fetches the next signed byte from the opcode stream, returning automatically
1114 * on failure.
1115 *
1116 * @param pi8 Where to return the signed byte.
1117 * @remark Implicitly references pIemCpu.
1118 */
1119#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1120 do \
1121 { \
1122 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1123 if (rcStrict2 != VINF_SUCCESS) \
1124 return rcStrict2; \
1125 } while (0)
1126
1127
1128/**
1129 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1130 *
1131 * @returns Strict VBox status code.
1132 * @param pIemCpu The IEM state.
1133 * @param pu16 Where to return the opcode dword.
1134 */
1135DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1136{
1137 uint8_t u8;
1138 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1139 if (rcStrict == VINF_SUCCESS)
1140 *pu16 = (int8_t)u8;
1141 return rcStrict;
1142}
1143
1144
1145/**
1146 * Fetches the next signed byte from the opcode stream, extending it to
1147 * unsigned 16-bit.
1148 *
1149 * @returns Strict VBox status code.
1150 * @param pIemCpu The IEM state.
1151 * @param pu16 Where to return the unsigned word.
1152 */
1153DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1154{
1155 uint8_t const offOpcode = pIemCpu->offOpcode;
1156 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1157 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1158
1159 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1160 pIemCpu->offOpcode = offOpcode + 1;
1161 return VINF_SUCCESS;
1162}
1163
1164
1165/**
1166 * Fetches the next signed byte from the opcode stream and sign-extending it to
1167 * a word, returning automatically on failure.
1168 *
1169 * @param pu16 Where to return the word.
1170 * @remark Implicitly references pIemCpu.
1171 */
1172#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1173 do \
1174 { \
1175 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1176 if (rcStrict2 != VINF_SUCCESS) \
1177 return rcStrict2; \
1178 } while (0)
1179
1180
1181/**
1182 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1183 *
1184 * @returns Strict VBox status code.
1185 * @param pIemCpu The IEM state.
1186 * @param pu32 Where to return the opcode dword.
1187 */
1188DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1189{
1190 uint8_t u8;
1191 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1192 if (rcStrict == VINF_SUCCESS)
1193 *pu32 = (int8_t)u8;
1194 return rcStrict;
1195}
1196
1197
1198/**
1199 * Fetches the next signed byte from the opcode stream, extending it to
1200 * unsigned 32-bit.
1201 *
1202 * @returns Strict VBox status code.
1203 * @param pIemCpu The IEM state.
1204 * @param pu32 Where to return the unsigned dword.
1205 */
1206DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1207{
1208 uint8_t const offOpcode = pIemCpu->offOpcode;
1209 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1210 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1211
1212 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1213 pIemCpu->offOpcode = offOpcode + 1;
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * Fetches the next signed byte from the opcode stream and sign-extending it to
1220 * a word, returning automatically on failure.
1221 *
1222 * @param pu32 Where to return the word.
1223 * @remark Implicitly references pIemCpu.
1224 */
1225#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1226 do \
1227 { \
1228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1229 if (rcStrict2 != VINF_SUCCESS) \
1230 return rcStrict2; \
1231 } while (0)
1232
1233
1234/**
1235 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1236 *
1237 * @returns Strict VBox status code.
1238 * @param pIemCpu The IEM state.
1239 * @param pu64 Where to return the opcode qword.
1240 */
1241DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1242{
1243 uint8_t u8;
1244 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1245 if (rcStrict == VINF_SUCCESS)
1246 *pu64 = (int8_t)u8;
1247 return rcStrict;
1248}
1249
1250
1251/**
1252 * Fetches the next signed byte from the opcode stream, extending it to
1253 * unsigned 64-bit.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pu64 Where to return the unsigned qword.
1258 */
1259DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1260{
1261 uint8_t const offOpcode = pIemCpu->offOpcode;
1262 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1263 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1264
1265 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 return VINF_SUCCESS;
1268}
1269
1270
1271/**
1272 * Fetches the next signed byte from the opcode stream and sign-extending it to
1273 * a word, returning automatically on failure.
1274 *
1275 * @param pu64 Where to return the word.
1276 * @remark Implicitly references pIemCpu.
1277 */
1278#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1279 do \
1280 { \
1281 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1282 if (rcStrict2 != VINF_SUCCESS) \
1283 return rcStrict2; \
1284 } while (0)
1285
1286
1287/**
1288 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1289 *
1290 * @returns Strict VBox status code.
1291 * @param pIemCpu The IEM state.
1292 * @param pu16 Where to return the opcode word.
1293 */
1294DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1295{
1296 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1297 if (rcStrict == VINF_SUCCESS)
1298 {
1299 uint8_t offOpcode = pIemCpu->offOpcode;
1300 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1301 pIemCpu->offOpcode = offOpcode + 2;
1302 }
1303 else
1304 *pu16 = 0;
1305 return rcStrict;
1306}
1307
1308
1309/**
1310 * Fetches the next opcode word.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pu16 Where to return the opcode word.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1317{
1318 uint8_t const offOpcode = pIemCpu->offOpcode;
1319 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1320 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1321
1322 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1323 pIemCpu->offOpcode = offOpcode + 2;
1324 return VINF_SUCCESS;
1325}
1326
1327
1328/**
1329 * Fetches the next opcode word, returns automatically on failure.
1330 *
1331 * @param a_pu16 Where to return the opcode word.
1332 * @remark Implicitly references pIemCpu.
1333 */
1334#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1335 do \
1336 { \
1337 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1338 if (rcStrict2 != VINF_SUCCESS) \
1339 return rcStrict2; \
1340 } while (0)
1341
1342
1343/**
1344 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pIemCpu The IEM state.
1348 * @param pu32 Where to return the opcode double word.
1349 */
1350DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1351{
1352 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1353 if (rcStrict == VINF_SUCCESS)
1354 {
1355 uint8_t offOpcode = pIemCpu->offOpcode;
1356 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1357 pIemCpu->offOpcode = offOpcode + 2;
1358 }
1359 else
1360 *pu32 = 0;
1361 return rcStrict;
1362}
1363
1364
1365/**
1366 * Fetches the next opcode word, zero extending it to a double word.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pIemCpu The IEM state.
1370 * @param pu32 Where to return the opcode double word.
1371 */
1372DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1373{
1374 uint8_t const offOpcode = pIemCpu->offOpcode;
1375 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1376 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1377
1378 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1379 pIemCpu->offOpcode = offOpcode + 2;
1380 return VINF_SUCCESS;
1381}
1382
1383
1384/**
1385 * Fetches the next opcode word and zero extends it to a double word, returns
1386 * automatically on failure.
1387 *
1388 * @param a_pu32 Where to return the opcode double word.
1389 * @remark Implicitly references pIemCpu.
1390 */
1391#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1392 do \
1393 { \
1394 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1395 if (rcStrict2 != VINF_SUCCESS) \
1396 return rcStrict2; \
1397 } while (0)
1398
1399
1400/**
1401 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pIemCpu The IEM state.
1405 * @param pu64 Where to return the opcode quad word.
1406 */
1407DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1408{
1409 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1410 if (rcStrict == VINF_SUCCESS)
1411 {
1412 uint8_t offOpcode = pIemCpu->offOpcode;
1413 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1414 pIemCpu->offOpcode = offOpcode + 2;
1415 }
1416 else
1417 *pu64 = 0;
1418 return rcStrict;
1419}
1420
1421
1422/**
1423 * Fetches the next opcode word, zero extending it to a quad word.
1424 *
1425 * @returns Strict VBox status code.
1426 * @param pIemCpu The IEM state.
1427 * @param pu64 Where to return the opcode quad word.
1428 */
1429DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1430{
1431 uint8_t const offOpcode = pIemCpu->offOpcode;
1432 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1433 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1434
1435 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1436 pIemCpu->offOpcode = offOpcode + 2;
1437 return VINF_SUCCESS;
1438}
1439
1440
1441/**
1442 * Fetches the next opcode word and zero extends it to a quad word, returns
1443 * automatically on failure.
1444 *
1445 * @param a_pu64 Where to return the opcode quad word.
1446 * @remark Implicitly references pIemCpu.
1447 */
1448#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1449 do \
1450 { \
1451 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1452 if (rcStrict2 != VINF_SUCCESS) \
1453 return rcStrict2; \
1454 } while (0)
1455
1456
1457/**
1458 * Fetches the next signed word from the opcode stream.
1459 *
1460 * @returns Strict VBox status code.
1461 * @param pIemCpu The IEM state.
1462 * @param pi16 Where to return the signed word.
1463 */
1464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1465{
1466 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1467}
1468
1469
1470/**
1471 * Fetches the next signed word from the opcode stream, returning automatically
1472 * on failure.
1473 *
1474 * @param pi16 Where to return the signed word.
1475 * @remark Implicitly references pIemCpu.
1476 */
1477#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1478 do \
1479 { \
1480 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1481 if (rcStrict2 != VINF_SUCCESS) \
1482 return rcStrict2; \
1483 } while (0)
1484
1485
1486/**
1487 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pIemCpu The IEM state.
1491 * @param pu32 Where to return the opcode dword.
1492 */
1493DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1494{
1495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1496 if (rcStrict == VINF_SUCCESS)
1497 {
1498 uint8_t offOpcode = pIemCpu->offOpcode;
1499 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1500 pIemCpu->abOpcode[offOpcode + 1],
1501 pIemCpu->abOpcode[offOpcode + 2],
1502 pIemCpu->abOpcode[offOpcode + 3]);
1503 pIemCpu->offOpcode = offOpcode + 4;
1504 }
1505 else
1506 *pu32 = 0;
1507 return rcStrict;
1508}
1509
1510
1511/**
1512 * Fetches the next opcode dword.
1513 *
1514 * @returns Strict VBox status code.
1515 * @param pIemCpu The IEM state.
1516 * @param pu32 Where to return the opcode double word.
1517 */
1518DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1519{
1520 uint8_t const offOpcode = pIemCpu->offOpcode;
1521 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1522 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1523
1524 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1525 pIemCpu->abOpcode[offOpcode + 1],
1526 pIemCpu->abOpcode[offOpcode + 2],
1527 pIemCpu->abOpcode[offOpcode + 3]);
1528 pIemCpu->offOpcode = offOpcode + 4;
1529 return VINF_SUCCESS;
1530}
1531
1532
1533/**
1534 * Fetches the next opcode dword, returns automatically on failure.
1535 *
1536 * @param a_pu32 Where to return the opcode dword.
1537 * @remark Implicitly references pIemCpu.
1538 */
1539#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1540 do \
1541 { \
1542 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1543 if (rcStrict2 != VINF_SUCCESS) \
1544 return rcStrict2; \
1545 } while (0)
1546
1547
1548/**
1549 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1550 *
1551 * @returns Strict VBox status code.
1552 * @param pIemCpu The IEM state.
1553 * @param pu32 Where to return the opcode dword.
1554 */
1555DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1556{
1557 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1558 if (rcStrict == VINF_SUCCESS)
1559 {
1560 uint8_t offOpcode = pIemCpu->offOpcode;
1561 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1562 pIemCpu->abOpcode[offOpcode + 1],
1563 pIemCpu->abOpcode[offOpcode + 2],
1564 pIemCpu->abOpcode[offOpcode + 3]);
1565 pIemCpu->offOpcode = offOpcode + 4;
1566 }
1567 else
1568 *pu64 = 0;
1569 return rcStrict;
1570}
1571
1572
1573/**
1574 * Fetches the next opcode dword, zero extending it to a quad word.
1575 *
1576 * @returns Strict VBox status code.
1577 * @param pIemCpu The IEM state.
1578 * @param pu64 Where to return the opcode quad word.
1579 */
1580DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1581{
1582 uint8_t const offOpcode = pIemCpu->offOpcode;
1583 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1584 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1585
1586 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1587 pIemCpu->abOpcode[offOpcode + 1],
1588 pIemCpu->abOpcode[offOpcode + 2],
1589 pIemCpu->abOpcode[offOpcode + 3]);
1590 pIemCpu->offOpcode = offOpcode + 4;
1591 return VINF_SUCCESS;
1592}
1593
1594
1595/**
1596 * Fetches the next opcode dword and zero extends it to a quad word, returns
1597 * automatically on failure.
1598 *
1599 * @param a_pu64 Where to return the opcode quad word.
1600 * @remark Implicitly references pIemCpu.
1601 */
1602#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1603 do \
1604 { \
1605 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1606 if (rcStrict2 != VINF_SUCCESS) \
1607 return rcStrict2; \
1608 } while (0)
1609
1610
1611/**
1612 * Fetches the next signed double word from the opcode stream.
1613 *
1614 * @returns Strict VBox status code.
1615 * @param pIemCpu The IEM state.
1616 * @param pi32 Where to return the signed double word.
1617 */
1618DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1619{
1620 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1621}
1622
1623/**
1624 * Fetches the next signed double word from the opcode stream, returning
1625 * automatically on failure.
1626 *
1627 * @param pi32 Where to return the signed double word.
1628 * @remark Implicitly references pIemCpu.
1629 */
1630#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1631 do \
1632 { \
1633 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1634 if (rcStrict2 != VINF_SUCCESS) \
1635 return rcStrict2; \
1636 } while (0)
1637
1638
1639/**
1640 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1641 *
1642 * @returns Strict VBox status code.
1643 * @param pIemCpu The IEM state.
1644 * @param pu64 Where to return the opcode qword.
1645 */
1646DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1647{
1648 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1649 if (rcStrict == VINF_SUCCESS)
1650 {
1651 uint8_t offOpcode = pIemCpu->offOpcode;
1652 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1653 pIemCpu->abOpcode[offOpcode + 1],
1654 pIemCpu->abOpcode[offOpcode + 2],
1655 pIemCpu->abOpcode[offOpcode + 3]);
1656 pIemCpu->offOpcode = offOpcode + 4;
1657 }
1658 else
1659 *pu64 = 0;
1660 return rcStrict;
1661}
1662
1663
1664/**
1665 * Fetches the next opcode dword, sign extending it into a quad word.
1666 *
1667 * @returns Strict VBox status code.
1668 * @param pIemCpu The IEM state.
1669 * @param pu64 Where to return the opcode quad word.
1670 */
1671DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1672{
1673 uint8_t const offOpcode = pIemCpu->offOpcode;
1674 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1675 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1676
1677 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1678 pIemCpu->abOpcode[offOpcode + 1],
1679 pIemCpu->abOpcode[offOpcode + 2],
1680 pIemCpu->abOpcode[offOpcode + 3]);
1681 *pu64 = i32;
1682 pIemCpu->offOpcode = offOpcode + 4;
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/**
1688 * Fetches the next opcode double word and sign extends it to a quad word,
1689 * returns automatically on failure.
1690 *
1691 * @param a_pu64 Where to return the opcode quad word.
1692 * @remark Implicitly references pIemCpu.
1693 */
1694#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1695 do \
1696 { \
1697 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1698 if (rcStrict2 != VINF_SUCCESS) \
1699 return rcStrict2; \
1700 } while (0)
1701
1702
1703/**
1704 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1705 *
1706 * @returns Strict VBox status code.
1707 * @param pIemCpu The IEM state.
1708 * @param pu64 Where to return the opcode qword.
1709 */
1710DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1711{
1712 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1713 if (rcStrict == VINF_SUCCESS)
1714 {
1715 uint8_t offOpcode = pIemCpu->offOpcode;
1716 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1717 pIemCpu->abOpcode[offOpcode + 1],
1718 pIemCpu->abOpcode[offOpcode + 2],
1719 pIemCpu->abOpcode[offOpcode + 3],
1720 pIemCpu->abOpcode[offOpcode + 4],
1721 pIemCpu->abOpcode[offOpcode + 5],
1722 pIemCpu->abOpcode[offOpcode + 6],
1723 pIemCpu->abOpcode[offOpcode + 7]);
1724 pIemCpu->offOpcode = offOpcode + 8;
1725 }
1726 else
1727 *pu64 = 0;
1728 return rcStrict;
1729}
1730
1731
1732/**
1733 * Fetches the next opcode qword.
1734 *
1735 * @returns Strict VBox status code.
1736 * @param pIemCpu The IEM state.
1737 * @param pu64 Where to return the opcode qword.
1738 */
1739DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1740{
1741 uint8_t const offOpcode = pIemCpu->offOpcode;
1742 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1743 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1744
1745 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1746 pIemCpu->abOpcode[offOpcode + 1],
1747 pIemCpu->abOpcode[offOpcode + 2],
1748 pIemCpu->abOpcode[offOpcode + 3],
1749 pIemCpu->abOpcode[offOpcode + 4],
1750 pIemCpu->abOpcode[offOpcode + 5],
1751 pIemCpu->abOpcode[offOpcode + 6],
1752 pIemCpu->abOpcode[offOpcode + 7]);
1753 pIemCpu->offOpcode = offOpcode + 8;
1754 return VINF_SUCCESS;
1755}
1756
1757
1758/**
1759 * Fetches the next opcode quad word, returns automatically on failure.
1760 *
1761 * @param a_pu64 Where to return the opcode quad word.
1762 * @remark Implicitly references pIemCpu.
1763 */
1764#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1765 do \
1766 { \
1767 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1768 if (rcStrict2 != VINF_SUCCESS) \
1769 return rcStrict2; \
1770 } while (0)
1771
1772
1773/** @name Misc Worker Functions.
1774 * @{
1775 */
1776
1777
1778/**
1779 * Validates a new SS segment.
1780 *
1781 * @returns VBox strict status code.
1782 * @param pIemCpu The IEM per CPU instance data.
1783 * @param pCtx The CPU context.
1784 * @param NewSS The new SS selctor.
1785 * @param uCpl The CPL to load the stack for.
1786 * @param pDesc Where to return the descriptor.
1787 */
1788static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1789{
1790 NOREF(pCtx);
1791
1792 /* Null selectors are not allowed (we're not called for dispatching
1793 interrupts with SS=0 in long mode). */
1794 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1795 {
1796 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1797 return iemRaiseGeneralProtectionFault0(pIemCpu);
1798 }
1799
1800 /*
1801 * Read the descriptor.
1802 */
1803 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1804 if (rcStrict != VINF_SUCCESS)
1805 return rcStrict;
1806
1807 /*
1808 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1809 */
1810 if (!pDesc->Legacy.Gen.u1DescType)
1811 {
1812 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1813 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1814 }
1815
1816 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1817 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1818 {
1819 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1820 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1821 }
1822 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1823 if ((NewSS & X86_SEL_RPL) != uCpl)
1824 {
1825 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1826 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1827 }
1828 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1829 {
1830 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1831 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1832 }
1833
1834 /* Is it there? */
1835 /** @todo testcase: Is this checked before the canonical / limit check below? */
1836 if (!pDesc->Legacy.Gen.u1Present)
1837 {
1838 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1839 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1840 }
1841
1842 return VINF_SUCCESS;
1843}
1844
1845
1846/**
1847 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1848 * not.
1849 *
1850 * @param a_pIemCpu The IEM per CPU data.
1851 * @param a_pCtx The CPU context.
1852 */
1853#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1854# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1855 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1856 ? (a_pCtx)->eflags.u \
1857 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1858#else
1859# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1860 ( (a_pCtx)->eflags.u )
1861#endif
1862
1863/**
1864 * Updates the EFLAGS in the correct manner wrt. PATM.
1865 *
1866 * @param a_pIemCpu The IEM per CPU data.
1867 * @param a_pCtx The CPU context.
1868 */
1869#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1870# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1871 do { \
1872 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1873 (a_pCtx)->eflags.u = (a_fEfl); \
1874 else \
1875 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1876 } while (0)
1877#else
1878# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1879 do { \
1880 (a_pCtx)->eflags.u = (a_fEfl); \
1881 } while (0)
1882#endif
1883
1884
1885/** @} */
1886
1887/** @name Raising Exceptions.
1888 *
1889 * @{
1890 */
1891
1892/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1893 * @{ */
1894/** CPU exception. */
1895#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1896/** External interrupt (from PIC, APIC, whatever). */
1897#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1898/** Software interrupt (int or into, not bound).
1899 * Returns to the following instruction */
1900#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1901/** Takes an error code. */
1902#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1903/** Takes a CR2. */
1904#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1905/** Generated by the breakpoint instruction. */
1906#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1907/** @} */
1908
1909
1910/**
1911 * Loads the specified stack far pointer from the TSS.
1912 *
1913 * @returns VBox strict status code.
1914 * @param pIemCpu The IEM per CPU instance data.
1915 * @param pCtx The CPU context.
1916 * @param uCpl The CPL to load the stack for.
1917 * @param pSelSS Where to return the new stack segment.
1918 * @param puEsp Where to return the new stack pointer.
1919 */
1920static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1921 PRTSEL pSelSS, uint32_t *puEsp)
1922{
1923 VBOXSTRICTRC rcStrict;
1924 Assert(uCpl < 4);
1925 *puEsp = 0; /* make gcc happy */
1926 *pSelSS = 0; /* make gcc happy */
1927
1928 switch (pCtx->tr.Attr.n.u4Type)
1929 {
1930 /*
1931 * 16-bit TSS (X86TSS16).
1932 */
1933 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1934 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1935 {
1936 uint32_t off = uCpl * 4 + 2;
1937 if (off + 4 > pCtx->tr.u32Limit)
1938 {
1939 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1940 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1941 }
1942
1943 uint32_t u32Tmp = 0; /* gcc maybe... */
1944 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1945 if (rcStrict == VINF_SUCCESS)
1946 {
1947 *puEsp = RT_LOWORD(u32Tmp);
1948 *pSelSS = RT_HIWORD(u32Tmp);
1949 return VINF_SUCCESS;
1950 }
1951 break;
1952 }
1953
1954 /*
1955 * 32-bit TSS (X86TSS32).
1956 */
1957 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1958 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1959 {
1960 uint32_t off = uCpl * 8 + 4;
1961 if (off + 7 > pCtx->tr.u32Limit)
1962 {
1963 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1964 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1965 }
1966
1967 uint64_t u64Tmp;
1968 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1969 if (rcStrict == VINF_SUCCESS)
1970 {
1971 *puEsp = u64Tmp & UINT32_MAX;
1972 *pSelSS = (RTSEL)(u64Tmp >> 32);
1973 return VINF_SUCCESS;
1974 }
1975 break;
1976 }
1977
1978 default:
1979 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1980 }
1981 return rcStrict;
1982}
1983
1984
1985/**
1986 * Loads the specified stack pointer from the 64-bit TSS.
1987 *
1988 * @returns VBox strict status code.
1989 * @param pIemCpu The IEM per CPU instance data.
1990 * @param pCtx The CPU context.
1991 * @param uCpl The CPL to load the stack for.
1992 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1993 * @param puRsp Where to return the new stack pointer.
1994 */
1995static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
1996 uint64_t *puRsp)
1997{
1998 Assert(uCpl < 4);
1999 Assert(uIst < 8);
2000 *puRsp = 0; /* make gcc happy */
2001
2002 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2003
2004 uint32_t off;
2005 if (uIst)
2006 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2007 else
2008 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2009 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2010 {
2011 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2012 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2013 }
2014
2015 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2016}
2017
2018
2019/**
2020 * Adjust the CPU state according to the exception being raised.
2021 *
2022 * @param pCtx The CPU context.
2023 * @param u8Vector The exception that has been raised.
2024 */
2025DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2026{
2027 switch (u8Vector)
2028 {
2029 case X86_XCPT_DB:
2030 pCtx->dr[7] &= ~X86_DR7_GD;
2031 break;
2032 /** @todo Read the AMD and Intel exception reference... */
2033 }
2034}
2035
2036
2037/**
2038 * Implements exceptions and interrupts for real mode.
2039 *
2040 * @returns VBox strict status code.
2041 * @param pIemCpu The IEM per CPU instance data.
2042 * @param pCtx The CPU context.
2043 * @param cbInstr The number of bytes to offset rIP by in the return
2044 * address.
2045 * @param u8Vector The interrupt / exception vector number.
2046 * @param fFlags The flags.
2047 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2048 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2049 */
2050static VBOXSTRICTRC
2051iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2052 PCPUMCTX pCtx,
2053 uint8_t cbInstr,
2054 uint8_t u8Vector,
2055 uint32_t fFlags,
2056 uint16_t uErr,
2057 uint64_t uCr2)
2058{
2059 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2060 NOREF(uErr); NOREF(uCr2);
2061
2062 /*
2063 * Read the IDT entry.
2064 */
2065 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2066 {
2067 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2068 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2069 }
2070 RTFAR16 Idte;
2071 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2072 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2073 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2074 return rcStrict;
2075
2076 /*
2077 * Push the stack frame.
2078 */
2079 uint16_t *pu16Frame;
2080 uint64_t uNewRsp;
2081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2086 pu16Frame[2] = (uint16_t)fEfl;
2087 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2088 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2089 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2091 return rcStrict;
2092
2093 /*
2094 * Load the vector address into cs:ip and make exception specific state
2095 * adjustments.
2096 */
2097 pCtx->cs.Sel = Idte.sel;
2098 pCtx->cs.ValidSel = Idte.sel;
2099 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2100 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2101 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2102 pCtx->rip = Idte.off;
2103 fEfl &= ~X86_EFL_IF;
2104 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2105
2106 /** @todo do we actually do this in real mode? */
2107 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2108 iemRaiseXcptAdjustState(pCtx, u8Vector);
2109
2110 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2111}
2112
2113
2114/**
2115 * Implements exceptions and interrupts for protected mode.
2116 *
2117 * @returns VBox strict status code.
2118 * @param pIemCpu The IEM per CPU instance data.
2119 * @param pCtx The CPU context.
2120 * @param cbInstr The number of bytes to offset rIP by in the return
2121 * address.
2122 * @param u8Vector The interrupt / exception vector number.
2123 * @param fFlags The flags.
2124 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2125 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2126 */
2127static VBOXSTRICTRC
2128iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2129 PCPUMCTX pCtx,
2130 uint8_t cbInstr,
2131 uint8_t u8Vector,
2132 uint32_t fFlags,
2133 uint16_t uErr,
2134 uint64_t uCr2)
2135{
2136 NOREF(cbInstr);
2137
2138 /*
2139 * Read the IDT entry.
2140 */
2141 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2142 {
2143 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2144 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2145 }
2146 X86DESC Idte;
2147 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2148 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2149 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2150 return rcStrict;
2151 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2152 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2153 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2154
2155 /*
2156 * Check the descriptor type, DPL and such.
2157 * ASSUMES this is done in the same order as described for call-gate calls.
2158 */
2159 if (Idte.Gate.u1DescType)
2160 {
2161 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2162 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2163 }
2164 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2165 switch (Idte.Gate.u4Type)
2166 {
2167 case X86_SEL_TYPE_SYS_UNDEFINED:
2168 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2169 case X86_SEL_TYPE_SYS_LDT:
2170 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2171 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2172 case X86_SEL_TYPE_SYS_UNDEFINED2:
2173 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2174 case X86_SEL_TYPE_SYS_UNDEFINED3:
2175 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2176 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2177 case X86_SEL_TYPE_SYS_UNDEFINED4:
2178 {
2179 /** @todo check what actually happens when the type is wrong...
2180 * esp. call gates. */
2181 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2182 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2183 }
2184
2185 case X86_SEL_TYPE_SYS_286_INT_GATE:
2186 case X86_SEL_TYPE_SYS_386_INT_GATE:
2187 fEflToClear |= X86_EFL_IF;
2188 break;
2189
2190 case X86_SEL_TYPE_SYS_TASK_GATE:
2191 /** @todo task gates. */
2192 AssertFailedReturn(VERR_NOT_SUPPORTED);
2193
2194 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2195 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2196 break;
2197
2198 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2199 }
2200
2201 /* Check DPL against CPL if applicable. */
2202 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2203 {
2204 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2205 {
2206 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2207 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2208 }
2209 }
2210
2211 /* Is it there? */
2212 if (!Idte.Gate.u1Present)
2213 {
2214 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2215 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2216 }
2217
2218 /* A null CS is bad. */
2219 RTSEL NewCS = Idte.Gate.u16Sel;
2220 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2221 {
2222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2223 return iemRaiseGeneralProtectionFault0(pIemCpu);
2224 }
2225
2226 /* Fetch the descriptor for the new CS. */
2227 IEMSELDESC DescCS;
2228 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2232 return rcStrict;
2233 }
2234
2235 /* Must be a code segment. */
2236 if (!DescCS.Legacy.Gen.u1DescType)
2237 {
2238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2239 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2240 }
2241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2242 {
2243 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2244 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2245 }
2246
2247 /* Don't allow lowering the privilege level. */
2248 /** @todo Does the lowering of privileges apply to software interrupts
2249 * only? This has bearings on the more-privileged or
2250 * same-privilege stack behavior further down. A testcase would
2251 * be nice. */
2252 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2253 {
2254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2255 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2256 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2257 }
2258
2259 /* Make sure the selector is present. */
2260 if (!DescCS.Legacy.Gen.u1Present)
2261 {
2262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2263 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2264 }
2265
2266 /* Check the new EIP against the new CS limit. */
2267 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2268 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2269 ? Idte.Gate.u16OffsetLow
2270 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2271 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2272 if (uNewEip > cbLimitCS)
2273 {
2274 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2275 u8Vector, uNewEip, cbLimitCS, NewCS));
2276 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2277 }
2278
2279 /*
2280 * If the privilege level changes, we need to get a new stack from the TSS.
2281 * This in turns means validating the new SS and ESP...
2282 */
2283 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2284 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2285 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2286 if (uNewCpl != pIemCpu->uCpl)
2287 {
2288 RTSEL NewSS;
2289 uint32_t uNewEsp;
2290 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2291 if (rcStrict != VINF_SUCCESS)
2292 return rcStrict;
2293
2294 IEMSELDESC DescSS;
2295 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2296 if (rcStrict != VINF_SUCCESS)
2297 return rcStrict;
2298
2299 /* Check that there is sufficient space for the stack frame. */
2300 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2301 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2302 {
2303 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2304 }
2305
2306 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2307 if ( uNewEsp - 1 > cbLimitSS
2308 || uNewEsp < cbStackFrame)
2309 {
2310 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2311 u8Vector, NewSS, uNewEsp, cbStackFrame));
2312 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2313 }
2314
2315 /*
2316 * Start making changes.
2317 */
2318
2319 /* Create the stack frame. */
2320 RTPTRUNION uStackFrame;
2321 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2322 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2323 if (rcStrict != VINF_SUCCESS)
2324 return rcStrict;
2325 void * const pvStackFrame = uStackFrame.pv;
2326
2327 if (fFlags & IEM_XCPT_FLAGS_ERR)
2328 *uStackFrame.pu32++ = uErr;
2329 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
2330 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2331 uStackFrame.pu32[2] = fEfl;
2332 uStackFrame.pu32[3] = pCtx->esp;
2333 uStackFrame.pu32[4] = pCtx->ss.Sel;
2334 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2335 if (rcStrict != VINF_SUCCESS)
2336 return rcStrict;
2337
2338 /* Mark the selectors 'accessed' (hope this is the correct time). */
2339 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2340 * after pushing the stack frame? (Write protect the gdt + stack to
2341 * find out.) */
2342 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2343 {
2344 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2345 if (rcStrict != VINF_SUCCESS)
2346 return rcStrict;
2347 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2348 }
2349
2350 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2351 {
2352 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2353 if (rcStrict != VINF_SUCCESS)
2354 return rcStrict;
2355 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2356 }
2357
2358 /*
2359 * Start comitting the register changes (joins with the DPL=CPL branch).
2360 */
2361 pCtx->ss.Sel = NewSS;
2362 pCtx->ss.ValidSel = NewSS;
2363 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2364 pCtx->ss.u32Limit = cbLimitSS;
2365 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2366 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2367 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2368 pIemCpu->uCpl = uNewCpl;
2369 }
2370 /*
2371 * Same privilege, no stack change and smaller stack frame.
2372 */
2373 else
2374 {
2375 uint64_t uNewRsp;
2376 RTPTRUNION uStackFrame;
2377 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2378 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2379 if (rcStrict != VINF_SUCCESS)
2380 return rcStrict;
2381 void * const pvStackFrame = uStackFrame.pv;
2382
2383 if (fFlags & IEM_XCPT_FLAGS_ERR)
2384 *uStackFrame.pu32++ = uErr;
2385 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2386 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2387 uStackFrame.pu32[2] = fEfl;
2388 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2389 if (rcStrict != VINF_SUCCESS)
2390 return rcStrict;
2391
2392 /* Mark the CS selector as 'accessed'. */
2393 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2394 {
2395 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2396 if (rcStrict != VINF_SUCCESS)
2397 return rcStrict;
2398 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2399 }
2400
2401 /*
2402 * Start committing the register changes (joins with the other branch).
2403 */
2404 pCtx->rsp = uNewRsp;
2405 }
2406
2407 /* ... register committing continues. */
2408 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2409 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2410 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2411 pCtx->cs.u32Limit = cbLimitCS;
2412 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2413 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2414
2415 pCtx->rip = uNewEip;
2416 fEfl &= ~fEflToClear;
2417 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2418
2419 if (fFlags & IEM_XCPT_FLAGS_CR2)
2420 pCtx->cr2 = uCr2;
2421
2422 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2423 iemRaiseXcptAdjustState(pCtx, u8Vector);
2424
2425 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2426}
2427
2428
2429/**
2430 * Implements exceptions and interrupts for V8086 mode.
2431 *
2432 * @returns VBox strict status code.
2433 * @param pIemCpu The IEM per CPU instance data.
2434 * @param pCtx The CPU context.
2435 * @param cbInstr The number of bytes to offset rIP by in the return
2436 * address.
2437 * @param u8Vector The interrupt / exception vector number.
2438 * @param fFlags The flags.
2439 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2440 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2441 */
2442static VBOXSTRICTRC
2443iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2444 PCPUMCTX pCtx,
2445 uint8_t cbInstr,
2446 uint8_t u8Vector,
2447 uint32_t fFlags,
2448 uint16_t uErr,
2449 uint64_t uCr2)
2450{
2451 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2452 /** @todo implement me. */
2453 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2454}
2455
2456
2457/**
2458 * Implements exceptions and interrupts for long mode.
2459 *
2460 * @returns VBox strict status code.
2461 * @param pIemCpu The IEM per CPU instance data.
2462 * @param pCtx The CPU context.
2463 * @param cbInstr The number of bytes to offset rIP by in the return
2464 * address.
2465 * @param u8Vector The interrupt / exception vector number.
2466 * @param fFlags The flags.
2467 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2468 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2469 */
2470static VBOXSTRICTRC
2471iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2472 PCPUMCTX pCtx,
2473 uint8_t cbInstr,
2474 uint8_t u8Vector,
2475 uint32_t fFlags,
2476 uint16_t uErr,
2477 uint64_t uCr2)
2478{
2479 NOREF(cbInstr);
2480
2481 /*
2482 * Read the IDT entry.
2483 */
2484 uint16_t offIdt = (uint16_t)u8Vector << 4;
2485 if (pCtx->idtr.cbIdt < offIdt + 7)
2486 {
2487 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2488 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2489 }
2490 X86DESC64 Idte;
2491 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
2492 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2493 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
2494 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2495 return rcStrict;
2496 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2497 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2498 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2499
2500 /*
2501 * Check the descriptor type, DPL and such.
2502 * ASSUMES this is done in the same order as described for call-gate calls.
2503 */
2504 if (Idte.Gate.u1DescType)
2505 {
2506 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2507 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2508 }
2509 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2510 switch (Idte.Gate.u4Type)
2511 {
2512 case AMD64_SEL_TYPE_SYS_INT_GATE:
2513 fEflToClear |= X86_EFL_IF;
2514 break;
2515 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2516 break;
2517
2518 default:
2519 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2520 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2521 }
2522
2523 /* Check DPL against CPL if applicable. */
2524 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2525 {
2526 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2527 {
2528 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2529 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2530 }
2531 }
2532
2533 /* Is it there? */
2534 if (!Idte.Gate.u1Present)
2535 {
2536 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2537 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2538 }
2539
2540 /* A null CS is bad. */
2541 RTSEL NewCS = Idte.Gate.u16Sel;
2542 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2543 {
2544 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2545 return iemRaiseGeneralProtectionFault0(pIemCpu);
2546 }
2547
2548 /* Fetch the descriptor for the new CS. */
2549 IEMSELDESC DescCS;
2550 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2551 if (rcStrict != VINF_SUCCESS)
2552 {
2553 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2554 return rcStrict;
2555 }
2556
2557 /* Must be a 64-bit code segment. */
2558 if (!DescCS.Long.Gen.u1DescType)
2559 {
2560 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2561 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2562 }
2563 if ( !DescCS.Long.Gen.u1Long
2564 || DescCS.Long.Gen.u1DefBig
2565 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2566 {
2567 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2568 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2569 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2570 }
2571
2572 /* Don't allow lowering the privilege level. For non-conforming CS
2573 selectors, the CS.DPL sets the privilege level the trap/interrupt
2574 handler runs at. For conforming CS selectors, the CPL remains
2575 unchanged, but the CS.DPL must be <= CPL. */
2576 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2577 * when CPU in Ring-0. Result \#GP? */
2578 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2579 {
2580 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2581 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2582 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2583 }
2584
2585
2586 /* Make sure the selector is present. */
2587 if (!DescCS.Legacy.Gen.u1Present)
2588 {
2589 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2590 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2591 }
2592
2593 /* Check that the new RIP is canonical. */
2594 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2595 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2596 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2597 if (!IEM_IS_CANONICAL(uNewRip))
2598 {
2599 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2600 return iemRaiseGeneralProtectionFault0(pIemCpu);
2601 }
2602
2603 /*
2604 * If the privilege level changes or if the IST isn't zero, we need to get
2605 * a new stack from the TSS.
2606 */
2607 uint64_t uNewRsp;
2608 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2609 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2610 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2611 if ( uNewCpl != pIemCpu->uCpl
2612 || Idte.Gate.u3IST != 0)
2613 {
2614 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2615 if (rcStrict != VINF_SUCCESS)
2616 return rcStrict;
2617 }
2618 else
2619 uNewRsp = pCtx->rsp;
2620 uNewRsp &= ~(uint64_t)0xf;
2621
2622 /*
2623 * Start making changes.
2624 */
2625
2626 /* Create the stack frame. */
2627 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2628 RTPTRUNION uStackFrame;
2629 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2630 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2631 if (rcStrict != VINF_SUCCESS)
2632 return rcStrict;
2633 void * const pvStackFrame = uStackFrame.pv;
2634
2635 if (fFlags & IEM_XCPT_FLAGS_ERR)
2636 *uStackFrame.pu64++ = uErr;
2637 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
2638 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
2639 uStackFrame.pu64[2] = fEfl;
2640 uStackFrame.pu64[3] = pCtx->rsp;
2641 uStackFrame.pu64[4] = pCtx->ss.Sel;
2642 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2643 if (rcStrict != VINF_SUCCESS)
2644 return rcStrict;
2645
2646 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2647 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2648 * after pushing the stack frame? (Write protect the gdt + stack to
2649 * find out.) */
2650 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2651 {
2652 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2653 if (rcStrict != VINF_SUCCESS)
2654 return rcStrict;
2655 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2656 }
2657
2658 /*
2659 * Start comitting the register changes.
2660 */
2661 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2662 * hidden registers when interrupting 32-bit or 16-bit code! */
2663 if (uNewCpl != pIemCpu->uCpl)
2664 {
2665 pCtx->ss.Sel = 0 | uNewCpl;
2666 pCtx->ss.ValidSel = 0 | uNewCpl;
2667 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2668 pCtx->ss.u32Limit = UINT32_MAX;
2669 pCtx->ss.u64Base = 0;
2670 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2671 }
2672 pCtx->rsp = uNewRsp - cbStackFrame;
2673 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2674 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2675 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2676 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2677 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2678 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2679 pCtx->rip = uNewRip;
2680 pIemCpu->uCpl = uNewCpl;
2681
2682 fEfl &= ~fEflToClear;
2683 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2684
2685 if (fFlags & IEM_XCPT_FLAGS_CR2)
2686 pCtx->cr2 = uCr2;
2687
2688 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2689 iemRaiseXcptAdjustState(pCtx, u8Vector);
2690
2691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2692}
2693
2694
2695/**
2696 * Implements exceptions and interrupts.
2697 *
2698 * All exceptions and interrupts goes thru this function!
2699 *
2700 * @returns VBox strict status code.
2701 * @param pIemCpu The IEM per CPU instance data.
2702 * @param cbInstr The number of bytes to offset rIP by in the return
2703 * address.
2704 * @param u8Vector The interrupt / exception vector number.
2705 * @param fFlags The flags.
2706 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2707 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2708 */
2709DECL_NO_INLINE(static, VBOXSTRICTRC)
2710iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2711 uint8_t cbInstr,
2712 uint8_t u8Vector,
2713 uint32_t fFlags,
2714 uint16_t uErr,
2715 uint64_t uCr2)
2716{
2717 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2718
2719 /*
2720 * Do recursion accounting.
2721 */
2722 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2723 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2724 if (pIemCpu->cXcptRecursions == 0)
2725 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2726 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2727 else
2728 {
2729 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2730 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2731
2732 /** @todo double and tripple faults. */
2733 if (pIemCpu->cXcptRecursions >= 3)
2734 {
2735#ifdef DEBUG_bird
2736 AssertFailed();
2737#endif
2738 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2739 }
2740
2741 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2742 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2743 {
2744 ....
2745 } */
2746 }
2747 pIemCpu->cXcptRecursions++;
2748 pIemCpu->uCurXcpt = u8Vector;
2749 pIemCpu->fCurXcpt = fFlags;
2750
2751 /*
2752 * Extensive logging.
2753 */
2754#if defined(LOG_ENABLED) && defined(IN_RING3)
2755 if (LogIs3Enabled())
2756 {
2757 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2758 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2759 char szRegs[4096];
2760 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2761 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2762 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2763 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2764 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2765 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2766 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2767 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2768 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2769 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2770 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2771 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2772 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2773 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2774 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2775 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2776 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2777 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2778 " efer=%016VR{efer}\n"
2779 " pat=%016VR{pat}\n"
2780 " sf_mask=%016VR{sf_mask}\n"
2781 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2782 " lstar=%016VR{lstar}\n"
2783 " star=%016VR{star} cstar=%016VR{cstar}\n"
2784 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2785 );
2786
2787 char szInstr[256];
2788 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2789 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2790 szInstr, sizeof(szInstr), NULL);
2791 Log3(("%s%s\n", szRegs, szInstr));
2792 }
2793#endif /* LOG_ENABLED */
2794
2795 /*
2796 * Call the mode specific worker function.
2797 */
2798 VBOXSTRICTRC rcStrict;
2799 if (!(pCtx->cr0 & X86_CR0_PE))
2800 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2801 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2802 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2803 else if (!pCtx->eflags.Bits.u1VM)
2804 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2805 else
2806 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2807
2808 /*
2809 * Unwind.
2810 */
2811 pIemCpu->cXcptRecursions--;
2812 pIemCpu->uCurXcpt = uPrevXcpt;
2813 pIemCpu->fCurXcpt = fPrevXcpt;
2814 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2815 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2816 return rcStrict;
2817}
2818
2819
2820/** \#DE - 00. */
2821DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2822{
2823 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2824}
2825
2826
2827/** \#DB - 01. */
2828DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2829{
2830 /** @todo set/clear RF. */
2831 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2832}
2833
2834
2835/** \#UD - 06. */
2836DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2837{
2838 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2839}
2840
2841
2842/** \#NM - 07. */
2843DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2844{
2845 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2846}
2847
2848
2849#ifdef SOME_UNUSED_FUNCTION
2850/** \#TS(err) - 0a. */
2851DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2852{
2853 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2854}
2855#endif
2856
2857
2858/** \#TS(tr) - 0a. */
2859DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2860{
2861 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2862 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2863}
2864
2865
2866/** \#NP(err) - 0b. */
2867DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2868{
2869 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2870}
2871
2872
2873/** \#NP(seg) - 0b. */
2874DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2875{
2876 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2877 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2878}
2879
2880
2881/** \#NP(sel) - 0b. */
2882DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2883{
2884 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2885 uSel & ~X86_SEL_RPL, 0);
2886}
2887
2888
2889/** \#SS(seg) - 0c. */
2890DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2891{
2892 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2893 uSel & ~X86_SEL_RPL, 0);
2894}
2895
2896
2897/** \#GP(n) - 0d. */
2898DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2899{
2900 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2901}
2902
2903
2904/** \#GP(0) - 0d. */
2905DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2906{
2907 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2908}
2909
2910
2911/** \#GP(sel) - 0d. */
2912DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2913{
2914 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2915 Sel & ~X86_SEL_RPL, 0);
2916}
2917
2918
2919/** \#GP(0) - 0d. */
2920DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2921{
2922 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2923}
2924
2925
2926/** \#GP(sel) - 0d. */
2927DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2928{
2929 NOREF(iSegReg); NOREF(fAccess);
2930 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2931 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2932}
2933
2934
2935/** \#GP(sel) - 0d. */
2936DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2937{
2938 NOREF(Sel);
2939 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2940}
2941
2942
2943/** \#GP(sel) - 0d. */
2944DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2945{
2946 NOREF(iSegReg); NOREF(fAccess);
2947 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2948}
2949
2950
2951/** \#PF(n) - 0e. */
2952DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2953{
2954 uint16_t uErr;
2955 switch (rc)
2956 {
2957 case VERR_PAGE_NOT_PRESENT:
2958 case VERR_PAGE_TABLE_NOT_PRESENT:
2959 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2960 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2961 uErr = 0;
2962 break;
2963
2964 default:
2965 AssertMsgFailed(("%Rrc\n", rc));
2966 case VERR_ACCESS_DENIED:
2967 uErr = X86_TRAP_PF_P;
2968 break;
2969
2970 /** @todo reserved */
2971 }
2972
2973 if (pIemCpu->uCpl == 3)
2974 uErr |= X86_TRAP_PF_US;
2975
2976 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2977 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2978 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2979 uErr |= X86_TRAP_PF_ID;
2980
2981 /* Note! RW access callers reporting a WRITE protection fault, will clear
2982 the READ flag before calling. So, read-modify-write accesses (RW)
2983 can safely be reported as READ faults. */
2984 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2985 uErr |= X86_TRAP_PF_RW;
2986
2987 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2988 uErr, GCPtrWhere);
2989}
2990
2991
2992/** \#MF(0) - 10. */
2993DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2994{
2995 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2996}
2997
2998
2999/** \#AC(0) - 11. */
3000DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
3001{
3002 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3003}
3004
3005
3006/**
3007 * Macro for calling iemCImplRaiseDivideError().
3008 *
3009 * This enables us to add/remove arguments and force different levels of
3010 * inlining as we wish.
3011 *
3012 * @return Strict VBox status code.
3013 */
3014#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
3015IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3016{
3017 NOREF(cbInstr);
3018 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3019}
3020
3021
3022/**
3023 * Macro for calling iemCImplRaiseInvalidLockPrefix().
3024 *
3025 * This enables us to add/remove arguments and force different levels of
3026 * inlining as we wish.
3027 *
3028 * @return Strict VBox status code.
3029 */
3030#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
3031IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3032{
3033 NOREF(cbInstr);
3034 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3035}
3036
3037
3038/**
3039 * Macro for calling iemCImplRaiseInvalidOpcode().
3040 *
3041 * This enables us to add/remove arguments and force different levels of
3042 * inlining as we wish.
3043 *
3044 * @return Strict VBox status code.
3045 */
3046#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
3047IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3048{
3049 NOREF(cbInstr);
3050 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3051}
3052
3053
3054/** @} */
3055
3056
3057/*
3058 *
3059 * Helpers routines.
3060 * Helpers routines.
3061 * Helpers routines.
3062 *
3063 */
3064
3065/**
3066 * Recalculates the effective operand size.
3067 *
3068 * @param pIemCpu The IEM state.
3069 */
3070static void iemRecalEffOpSize(PIEMCPU pIemCpu)
3071{
3072 switch (pIemCpu->enmCpuMode)
3073 {
3074 case IEMMODE_16BIT:
3075 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
3076 break;
3077 case IEMMODE_32BIT:
3078 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
3079 break;
3080 case IEMMODE_64BIT:
3081 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
3082 {
3083 case 0:
3084 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
3085 break;
3086 case IEM_OP_PRF_SIZE_OP:
3087 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3088 break;
3089 case IEM_OP_PRF_SIZE_REX_W:
3090 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
3091 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3092 break;
3093 }
3094 break;
3095 default:
3096 AssertFailed();
3097 }
3098}
3099
3100
3101/**
3102 * Sets the default operand size to 64-bit and recalculates the effective
3103 * operand size.
3104 *
3105 * @param pIemCpu The IEM state.
3106 */
3107static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
3108{
3109 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3110 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
3111 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
3112 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3113 else
3114 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3115}
3116
3117
3118/*
3119 *
3120 * Common opcode decoders.
3121 * Common opcode decoders.
3122 * Common opcode decoders.
3123 *
3124 */
3125//#include <iprt/mem.h>
3126
3127/**
3128 * Used to add extra details about a stub case.
3129 * @param pIemCpu The IEM per CPU state.
3130 */
3131static void iemOpStubMsg2(PIEMCPU pIemCpu)
3132{
3133#if defined(LOG_ENABLED) && defined(IN_RING3)
3134 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3135 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3136 char szRegs[4096];
3137 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3138 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3139 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3140 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3141 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3142 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3143 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3144 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3145 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3146 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3147 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3148 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3149 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3150 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3151 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3152 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3153 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3154 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3155 " efer=%016VR{efer}\n"
3156 " pat=%016VR{pat}\n"
3157 " sf_mask=%016VR{sf_mask}\n"
3158 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3159 " lstar=%016VR{lstar}\n"
3160 " star=%016VR{star} cstar=%016VR{cstar}\n"
3161 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3162 );
3163
3164 char szInstr[256];
3165 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3166 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3167 szInstr, sizeof(szInstr), NULL);
3168
3169 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
3170#else
3171 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
3172#endif
3173}
3174
3175/**
3176 * Complains about a stub.
3177 *
3178 * Providing two versions of this macro, one for daily use and one for use when
3179 * working on IEM.
3180 */
3181#if 0
3182# define IEMOP_BITCH_ABOUT_STUB() \
3183 do { \
3184 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
3185 iemOpStubMsg2(pIemCpu); \
3186 RTAssertPanic(); \
3187 } while (0)
3188#else
3189# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
3190#endif
3191
3192/** Stubs an opcode. */
3193#define FNIEMOP_STUB(a_Name) \
3194 FNIEMOP_DEF(a_Name) \
3195 { \
3196 IEMOP_BITCH_ABOUT_STUB(); \
3197 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3198 } \
3199 typedef int ignore_semicolon
3200
3201/** Stubs an opcode. */
3202#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
3203 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3204 { \
3205 IEMOP_BITCH_ABOUT_STUB(); \
3206 NOREF(a_Name0); \
3207 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3208 } \
3209 typedef int ignore_semicolon
3210
3211/** Stubs an opcode which currently should raise \#UD. */
3212#define FNIEMOP_UD_STUB(a_Name) \
3213 FNIEMOP_DEF(a_Name) \
3214 { \
3215 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3216 return IEMOP_RAISE_INVALID_OPCODE(); \
3217 } \
3218 typedef int ignore_semicolon
3219
3220/** Stubs an opcode which currently should raise \#UD. */
3221#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
3222 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3223 { \
3224 NOREF(a_Name0); \
3225 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3226 return IEMOP_RAISE_INVALID_OPCODE(); \
3227 } \
3228 typedef int ignore_semicolon
3229
3230
3231
3232/** @name Register Access.
3233 * @{
3234 */
3235
3236/**
3237 * Gets a reference (pointer) to the specified hidden segment register.
3238 *
3239 * @returns Hidden register reference.
3240 * @param pIemCpu The per CPU data.
3241 * @param iSegReg The segment register.
3242 */
3243static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
3244{
3245 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3246 PCPUMSELREG pSReg;
3247 switch (iSegReg)
3248 {
3249 case X86_SREG_ES: pSReg = &pCtx->es; break;
3250 case X86_SREG_CS: pSReg = &pCtx->cs; break;
3251 case X86_SREG_SS: pSReg = &pCtx->ss; break;
3252 case X86_SREG_DS: pSReg = &pCtx->ds; break;
3253 case X86_SREG_FS: pSReg = &pCtx->fs; break;
3254 case X86_SREG_GS: pSReg = &pCtx->gs; break;
3255 default:
3256 AssertFailedReturn(NULL);
3257 }
3258#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3259 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
3260 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
3261#else
3262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
3263#endif
3264 return pSReg;
3265}
3266
3267
3268/**
3269 * Gets a reference (pointer) to the specified segment register (the selector
3270 * value).
3271 *
3272 * @returns Pointer to the selector variable.
3273 * @param pIemCpu The per CPU data.
3274 * @param iSegReg The segment register.
3275 */
3276static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
3277{
3278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3279 switch (iSegReg)
3280 {
3281 case X86_SREG_ES: return &pCtx->es.Sel;
3282 case X86_SREG_CS: return &pCtx->cs.Sel;
3283 case X86_SREG_SS: return &pCtx->ss.Sel;
3284 case X86_SREG_DS: return &pCtx->ds.Sel;
3285 case X86_SREG_FS: return &pCtx->fs.Sel;
3286 case X86_SREG_GS: return &pCtx->gs.Sel;
3287 }
3288 AssertFailedReturn(NULL);
3289}
3290
3291
3292/**
3293 * Fetches the selector value of a segment register.
3294 *
3295 * @returns The selector value.
3296 * @param pIemCpu The per CPU data.
3297 * @param iSegReg The segment register.
3298 */
3299static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3300{
3301 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3302 switch (iSegReg)
3303 {
3304 case X86_SREG_ES: return pCtx->es.Sel;
3305 case X86_SREG_CS: return pCtx->cs.Sel;
3306 case X86_SREG_SS: return pCtx->ss.Sel;
3307 case X86_SREG_DS: return pCtx->ds.Sel;
3308 case X86_SREG_FS: return pCtx->fs.Sel;
3309 case X86_SREG_GS: return pCtx->gs.Sel;
3310 }
3311 AssertFailedReturn(0xffff);
3312}
3313
3314
3315/**
3316 * Gets a reference (pointer) to the specified general register.
3317 *
3318 * @returns Register reference.
3319 * @param pIemCpu The per CPU data.
3320 * @param iReg The general register.
3321 */
3322static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3323{
3324 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3325 switch (iReg)
3326 {
3327 case X86_GREG_xAX: return &pCtx->rax;
3328 case X86_GREG_xCX: return &pCtx->rcx;
3329 case X86_GREG_xDX: return &pCtx->rdx;
3330 case X86_GREG_xBX: return &pCtx->rbx;
3331 case X86_GREG_xSP: return &pCtx->rsp;
3332 case X86_GREG_xBP: return &pCtx->rbp;
3333 case X86_GREG_xSI: return &pCtx->rsi;
3334 case X86_GREG_xDI: return &pCtx->rdi;
3335 case X86_GREG_x8: return &pCtx->r8;
3336 case X86_GREG_x9: return &pCtx->r9;
3337 case X86_GREG_x10: return &pCtx->r10;
3338 case X86_GREG_x11: return &pCtx->r11;
3339 case X86_GREG_x12: return &pCtx->r12;
3340 case X86_GREG_x13: return &pCtx->r13;
3341 case X86_GREG_x14: return &pCtx->r14;
3342 case X86_GREG_x15: return &pCtx->r15;
3343 }
3344 AssertFailedReturn(NULL);
3345}
3346
3347
3348/**
3349 * Gets a reference (pointer) to the specified 8-bit general register.
3350 *
3351 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3352 *
3353 * @returns Register reference.
3354 * @param pIemCpu The per CPU data.
3355 * @param iReg The register.
3356 */
3357static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3358{
3359 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3360 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3361
3362 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3363 if (iReg >= 4)
3364 pu8Reg++;
3365 return pu8Reg;
3366}
3367
3368
3369/**
3370 * Fetches the value of a 8-bit general register.
3371 *
3372 * @returns The register value.
3373 * @param pIemCpu The per CPU data.
3374 * @param iReg The register.
3375 */
3376static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3377{
3378 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3379 return *pbSrc;
3380}
3381
3382
3383/**
3384 * Fetches the value of a 16-bit general register.
3385 *
3386 * @returns The register value.
3387 * @param pIemCpu The per CPU data.
3388 * @param iReg The register.
3389 */
3390static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3391{
3392 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3393}
3394
3395
3396/**
3397 * Fetches the value of a 32-bit general register.
3398 *
3399 * @returns The register value.
3400 * @param pIemCpu The per CPU data.
3401 * @param iReg The register.
3402 */
3403static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3404{
3405 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3406}
3407
3408
3409/**
3410 * Fetches the value of a 64-bit general register.
3411 *
3412 * @returns The register value.
3413 * @param pIemCpu The per CPU data.
3414 * @param iReg The register.
3415 */
3416static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3417{
3418 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3419}
3420
3421
3422/**
3423 * Is the FPU state in FXSAVE format or not.
3424 *
3425 * @returns true if it is, false if it's in FNSAVE.
3426 * @param pVCpu Pointer to the VMCPU.
3427 */
3428DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3429{
3430#ifdef RT_ARCH_AMD64
3431 NOREF(pIemCpu);
3432 return true;
3433#else
3434 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3435 return true;
3436#endif
3437}
3438
3439
3440/**
3441 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3442 *
3443 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3444 * segment limit.
3445 *
3446 * @param pIemCpu The per CPU data.
3447 * @param offNextInstr The offset of the next instruction.
3448 */
3449static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3450{
3451 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3452 switch (pIemCpu->enmEffOpSize)
3453 {
3454 case IEMMODE_16BIT:
3455 {
3456 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3457 if ( uNewIp > pCtx->cs.u32Limit
3458 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3459 return iemRaiseGeneralProtectionFault0(pIemCpu);
3460 pCtx->rip = uNewIp;
3461 break;
3462 }
3463
3464 case IEMMODE_32BIT:
3465 {
3466 Assert(pCtx->rip <= UINT32_MAX);
3467 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3468
3469 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3470 if (uNewEip > pCtx->cs.u32Limit)
3471 return iemRaiseGeneralProtectionFault0(pIemCpu);
3472 pCtx->rip = uNewEip;
3473 break;
3474 }
3475
3476 case IEMMODE_64BIT:
3477 {
3478 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3479
3480 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3481 if (!IEM_IS_CANONICAL(uNewRip))
3482 return iemRaiseGeneralProtectionFault0(pIemCpu);
3483 pCtx->rip = uNewRip;
3484 break;
3485 }
3486
3487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3488 }
3489
3490 return VINF_SUCCESS;
3491}
3492
3493
3494/**
3495 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3496 *
3497 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3498 * segment limit.
3499 *
3500 * @returns Strict VBox status code.
3501 * @param pIemCpu The per CPU data.
3502 * @param offNextInstr The offset of the next instruction.
3503 */
3504static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3505{
3506 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3507 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3508
3509 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3510 if ( uNewIp > pCtx->cs.u32Limit
3511 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3512 return iemRaiseGeneralProtectionFault0(pIemCpu);
3513 /** @todo Test 16-bit jump in 64-bit mode. */
3514 pCtx->rip = uNewIp;
3515
3516 return VINF_SUCCESS;
3517}
3518
3519
3520/**
3521 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3522 *
3523 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3524 * segment limit.
3525 *
3526 * @returns Strict VBox status code.
3527 * @param pIemCpu The per CPU data.
3528 * @param offNextInstr The offset of the next instruction.
3529 */
3530static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3531{
3532 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3533 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3534
3535 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3536 {
3537 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3538
3539 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3540 if (uNewEip > pCtx->cs.u32Limit)
3541 return iemRaiseGeneralProtectionFault0(pIemCpu);
3542 pCtx->rip = uNewEip;
3543 }
3544 else
3545 {
3546 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3547
3548 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3549 if (!IEM_IS_CANONICAL(uNewRip))
3550 return iemRaiseGeneralProtectionFault0(pIemCpu);
3551 pCtx->rip = uNewRip;
3552 }
3553 return VINF_SUCCESS;
3554}
3555
3556
3557/**
3558 * Performs a near jump to the specified address.
3559 *
3560 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3561 * segment limit.
3562 *
3563 * @param pIemCpu The per CPU data.
3564 * @param uNewRip The new RIP value.
3565 */
3566static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3567{
3568 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3569 switch (pIemCpu->enmEffOpSize)
3570 {
3571 case IEMMODE_16BIT:
3572 {
3573 Assert(uNewRip <= UINT16_MAX);
3574 if ( uNewRip > pCtx->cs.u32Limit
3575 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3576 return iemRaiseGeneralProtectionFault0(pIemCpu);
3577 /** @todo Test 16-bit jump in 64-bit mode. */
3578 pCtx->rip = uNewRip;
3579 break;
3580 }
3581
3582 case IEMMODE_32BIT:
3583 {
3584 Assert(uNewRip <= UINT32_MAX);
3585 Assert(pCtx->rip <= UINT32_MAX);
3586 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3587
3588 if (uNewRip > pCtx->cs.u32Limit)
3589 return iemRaiseGeneralProtectionFault0(pIemCpu);
3590 pCtx->rip = uNewRip;
3591 break;
3592 }
3593
3594 case IEMMODE_64BIT:
3595 {
3596 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3597
3598 if (!IEM_IS_CANONICAL(uNewRip))
3599 return iemRaiseGeneralProtectionFault0(pIemCpu);
3600 pCtx->rip = uNewRip;
3601 break;
3602 }
3603
3604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3605 }
3606
3607 return VINF_SUCCESS;
3608}
3609
3610
3611/**
3612 * Get the address of the top of the stack.
3613 *
3614 * @param pIemCpu The per CPU data.
3615 * @param pCtx The CPU context which SP/ESP/RSP should be
3616 * read.
3617 */
3618DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
3619{
3620 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3621 return pCtx->rsp;
3622 if (pCtx->ss.Attr.n.u1DefBig)
3623 return pCtx->esp;
3624 return pCtx->sp;
3625}
3626
3627
3628/**
3629 * Updates the RIP/EIP/IP to point to the next instruction.
3630 *
3631 * @param pIemCpu The per CPU data.
3632 * @param cbInstr The number of bytes to add.
3633 */
3634static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3635{
3636 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3637 switch (pIemCpu->enmCpuMode)
3638 {
3639 case IEMMODE_16BIT:
3640 Assert(pCtx->rip <= UINT16_MAX);
3641 pCtx->eip += cbInstr;
3642 pCtx->eip &= UINT32_C(0xffff);
3643 break;
3644
3645 case IEMMODE_32BIT:
3646 pCtx->eip += cbInstr;
3647 Assert(pCtx->rip <= UINT32_MAX);
3648 break;
3649
3650 case IEMMODE_64BIT:
3651 pCtx->rip += cbInstr;
3652 break;
3653 default: AssertFailed();
3654 }
3655}
3656
3657
3658/**
3659 * Updates the RIP/EIP/IP to point to the next instruction.
3660 *
3661 * @param pIemCpu The per CPU data.
3662 */
3663static void iemRegUpdateRip(PIEMCPU pIemCpu)
3664{
3665 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3666}
3667
3668
3669/**
3670 * Adds to the stack pointer.
3671 *
3672 * @param pIemCpu The per CPU data.
3673 * @param pCtx The CPU context which SP/ESP/RSP should be
3674 * updated.
3675 * @param cbToAdd The number of bytes to add.
3676 */
3677DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
3678{
3679 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3680 pCtx->rsp += cbToAdd;
3681 else if (pCtx->ss.Attr.n.u1DefBig)
3682 pCtx->esp += cbToAdd;
3683 else
3684 pCtx->sp += cbToAdd;
3685}
3686
3687
3688/**
3689 * Subtracts from the stack pointer.
3690 *
3691 * @param pIemCpu The per CPU data.
3692 * @param pCtx The CPU context which SP/ESP/RSP should be
3693 * updated.
3694 * @param cbToSub The number of bytes to subtract.
3695 */
3696DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
3697{
3698 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3699 pCtx->rsp -= cbToSub;
3700 else if (pCtx->ss.Attr.n.u1DefBig)
3701 pCtx->esp -= cbToSub;
3702 else
3703 pCtx->sp -= cbToSub;
3704}
3705
3706
3707/**
3708 * Adds to the temporary stack pointer.
3709 *
3710 * @param pIemCpu The per CPU data.
3711 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3712 * @param cbToAdd The number of bytes to add.
3713 * @param pCtx Where to get the current stack mode.
3714 */
3715DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
3716{
3717 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3718 pTmpRsp->u += cbToAdd;
3719 else if (pCtx->ss.Attr.n.u1DefBig)
3720 pTmpRsp->DWords.dw0 += cbToAdd;
3721 else
3722 pTmpRsp->Words.w0 += cbToAdd;
3723}
3724
3725
3726/**
3727 * Subtracts from the temporary stack pointer.
3728 *
3729 * @param pIemCpu The per CPU data.
3730 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3731 * @param cbToSub The number of bytes to subtract.
3732 * @param pCtx Where to get the current stack mode.
3733 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3734 * expecting that.
3735 */
3736DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
3737{
3738 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3739 pTmpRsp->u -= cbToSub;
3740 else if (pCtx->ss.Attr.n.u1DefBig)
3741 pTmpRsp->DWords.dw0 -= cbToSub;
3742 else
3743 pTmpRsp->Words.w0 -= cbToSub;
3744}
3745
3746
3747/**
3748 * Calculates the effective stack address for a push of the specified size as
3749 * well as the new RSP value (upper bits may be masked).
3750 *
3751 * @returns Effective stack addressf for the push.
3752 * @param pIemCpu The IEM per CPU data.
3753 * @param pCtx Where to get the current stack mode.
3754 * @param cbItem The size of the stack item to pop.
3755 * @param puNewRsp Where to return the new RSP value.
3756 */
3757DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3758{
3759 RTUINT64U uTmpRsp;
3760 RTGCPTR GCPtrTop;
3761 uTmpRsp.u = pCtx->rsp;
3762
3763 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3764 GCPtrTop = uTmpRsp.u -= cbItem;
3765 else if (pCtx->ss.Attr.n.u1DefBig)
3766 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3767 else
3768 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3769 *puNewRsp = uTmpRsp.u;
3770 return GCPtrTop;
3771}
3772
3773
3774/**
3775 * Gets the current stack pointer and calculates the value after a pop of the
3776 * specified size.
3777 *
3778 * @returns Current stack pointer.
3779 * @param pIemCpu The per CPU data.
3780 * @param pCtx Where to get the current stack mode.
3781 * @param cbItem The size of the stack item to pop.
3782 * @param puNewRsp Where to return the new RSP value.
3783 */
3784DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3785{
3786 RTUINT64U uTmpRsp;
3787 RTGCPTR GCPtrTop;
3788 uTmpRsp.u = pCtx->rsp;
3789
3790 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3791 {
3792 GCPtrTop = uTmpRsp.u;
3793 uTmpRsp.u += cbItem;
3794 }
3795 else if (pCtx->ss.Attr.n.u1DefBig)
3796 {
3797 GCPtrTop = uTmpRsp.DWords.dw0;
3798 uTmpRsp.DWords.dw0 += cbItem;
3799 }
3800 else
3801 {
3802 GCPtrTop = uTmpRsp.Words.w0;
3803 uTmpRsp.Words.w0 += cbItem;
3804 }
3805 *puNewRsp = uTmpRsp.u;
3806 return GCPtrTop;
3807}
3808
3809
3810/**
3811 * Calculates the effective stack address for a push of the specified size as
3812 * well as the new temporary RSP value (upper bits may be masked).
3813 *
3814 * @returns Effective stack addressf for the push.
3815 * @param pIemCpu The per CPU data.
3816 * @param pTmpRsp The temporary stack pointer. This is updated.
3817 * @param cbItem The size of the stack item to pop.
3818 * @param puNewRsp Where to return the new RSP value.
3819 */
3820DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
3821{
3822 RTGCPTR GCPtrTop;
3823
3824 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3825 GCPtrTop = pTmpRsp->u -= cbItem;
3826 else if (pCtx->ss.Attr.n.u1DefBig)
3827 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3828 else
3829 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3830 return GCPtrTop;
3831}
3832
3833
3834/**
3835 * Gets the effective stack address for a pop of the specified size and
3836 * calculates and updates the temporary RSP.
3837 *
3838 * @returns Current stack pointer.
3839 * @param pIemCpu The per CPU data.
3840 * @param pTmpRsp The temporary stack pointer. This is updated.
3841 * @param pCtx Where to get the current stack mode.
3842 * @param cbItem The size of the stack item to pop.
3843 */
3844DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
3845{
3846 RTGCPTR GCPtrTop;
3847 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3848 {
3849 GCPtrTop = pTmpRsp->u;
3850 pTmpRsp->u += cbItem;
3851 }
3852 else if (pCtx->ss.Attr.n.u1DefBig)
3853 {
3854 GCPtrTop = pTmpRsp->DWords.dw0;
3855 pTmpRsp->DWords.dw0 += cbItem;
3856 }
3857 else
3858 {
3859 GCPtrTop = pTmpRsp->Words.w0;
3860 pTmpRsp->Words.w0 += cbItem;
3861 }
3862 return GCPtrTop;
3863}
3864
3865
3866/**
3867 * Checks if an Intel CPUID feature bit is set.
3868 *
3869 * @returns true / false.
3870 *
3871 * @param pIemCpu The IEM per CPU data.
3872 * @param fEdx The EDX bit to test, or 0 if ECX.
3873 * @param fEcx The ECX bit to test, or 0 if EDX.
3874 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3875 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3876 */
3877static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3878{
3879 uint32_t uEax, uEbx, uEcx, uEdx;
3880 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3881 return (fEcx && (uEcx & fEcx))
3882 || (fEdx && (uEdx & fEdx));
3883}
3884
3885
3886/**
3887 * Checks if an AMD CPUID feature bit is set.
3888 *
3889 * @returns true / false.
3890 *
3891 * @param pIemCpu The IEM per CPU data.
3892 * @param fEdx The EDX bit to test, or 0 if ECX.
3893 * @param fEcx The ECX bit to test, or 0 if EDX.
3894 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3895 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3896 */
3897static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3898{
3899 uint32_t uEax, uEbx, uEcx, uEdx;
3900 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3901 return (fEcx && (uEcx & fEcx))
3902 || (fEdx && (uEdx & fEdx));
3903}
3904
3905/** @} */
3906
3907
3908/** @name FPU access and helpers.
3909 *
3910 * @{
3911 */
3912
3913
3914/**
3915 * Hook for preparing to use the host FPU.
3916 *
3917 * This is necessary in ring-0 and raw-mode context.
3918 *
3919 * @param pIemCpu The IEM per CPU data.
3920 */
3921DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3922{
3923#ifdef IN_RING3
3924 NOREF(pIemCpu);
3925#else
3926/** @todo RZ: FIXME */
3927//# error "Implement me"
3928#endif
3929}
3930
3931
3932/**
3933 * Hook for preparing to use the host FPU for SSE
3934 *
3935 * This is necessary in ring-0 and raw-mode context.
3936 *
3937 * @param pIemCpu The IEM per CPU data.
3938 */
3939DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
3940{
3941 iemFpuPrepareUsage(pIemCpu);
3942}
3943
3944
3945/**
3946 * Stores a QNaN value into a FPU register.
3947 *
3948 * @param pReg Pointer to the register.
3949 */
3950DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3951{
3952 pReg->au32[0] = UINT32_C(0x00000000);
3953 pReg->au32[1] = UINT32_C(0xc0000000);
3954 pReg->au16[4] = UINT16_C(0xffff);
3955}
3956
3957
3958/**
3959 * Updates the FOP, FPU.CS and FPUIP registers.
3960 *
3961 * @param pIemCpu The IEM per CPU data.
3962 * @param pCtx The CPU context.
3963 */
3964DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3965{
3966 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3967 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3968 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3969 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3970 {
3971 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3972 * happens in real mode here based on the fnsave and fnstenv images. */
3973 pCtx->fpu.CS = 0;
3974 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3975 }
3976 else
3977 {
3978 pCtx->fpu.CS = pCtx->cs.Sel;
3979 pCtx->fpu.FPUIP = pCtx->rip;
3980 }
3981}
3982
3983
3984/**
3985 * Updates the FPU.DS and FPUDP registers.
3986 *
3987 * @param pIemCpu The IEM per CPU data.
3988 * @param pCtx The CPU context.
3989 * @param iEffSeg The effective segment register.
3990 * @param GCPtrEff The effective address relative to @a iEffSeg.
3991 */
3992DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3993{
3994 RTSEL sel;
3995 switch (iEffSeg)
3996 {
3997 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3998 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3999 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
4000 case X86_SREG_ES: sel = pCtx->es.Sel; break;
4001 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
4002 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
4003 default:
4004 AssertMsgFailed(("%d\n", iEffSeg));
4005 sel = pCtx->ds.Sel;
4006 }
4007 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
4008 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4009 {
4010 pCtx->fpu.DS = 0;
4011 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
4012 }
4013 else
4014 {
4015 pCtx->fpu.DS = sel;
4016 pCtx->fpu.FPUDP = GCPtrEff;
4017 }
4018}
4019
4020
4021/**
4022 * Rotates the stack registers in the push direction.
4023 *
4024 * @param pCtx The CPU context.
4025 * @remarks This is a complete waste of time, but fxsave stores the registers in
4026 * stack order.
4027 */
4028DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
4029{
4030 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
4031 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
4032 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
4033 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
4034 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
4035 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
4036 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
4037 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
4038 pCtx->fpu.aRegs[0].r80 = r80Tmp;
4039}
4040
4041
4042/**
4043 * Rotates the stack registers in the pop direction.
4044 *
4045 * @param pCtx The CPU context.
4046 * @remarks This is a complete waste of time, but fxsave stores the registers in
4047 * stack order.
4048 */
4049DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
4050{
4051 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
4052 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
4053 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
4054 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
4055 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
4056 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
4057 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
4058 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
4059 pCtx->fpu.aRegs[7].r80 = r80Tmp;
4060}
4061
4062
4063/**
4064 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4065 * exception prevents it.
4066 *
4067 * @param pIemCpu The IEM per CPU data.
4068 * @param pResult The FPU operation result to push.
4069 * @param pCtx The CPU context.
4070 */
4071static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
4072{
4073 /* Update FSW and bail if there are pending exceptions afterwards. */
4074 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4075 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4076 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4077 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4078 {
4079 pCtx->fpu.FSW = fFsw;
4080 return;
4081 }
4082
4083 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4084 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4085 {
4086 /* All is fine, push the actual value. */
4087 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4088 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
4089 }
4090 else if (pCtx->fpu.FCW & X86_FCW_IM)
4091 {
4092 /* Masked stack overflow, push QNaN. */
4093 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4094 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4095 }
4096 else
4097 {
4098 /* Raise stack overflow, don't push anything. */
4099 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4100 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4101 return;
4102 }
4103
4104 fFsw &= ~X86_FSW_TOP_MASK;
4105 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4106 pCtx->fpu.FSW = fFsw;
4107
4108 iemFpuRotateStackPush(pCtx);
4109}
4110
4111
4112/**
4113 * Stores a result in a FPU register and updates the FSW and FTW.
4114 *
4115 * @param pIemCpu The IEM per CPU data.
4116 * @param pResult The result to store.
4117 * @param iStReg Which FPU register to store it in.
4118 * @param pCtx The CPU context.
4119 */
4120static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
4121{
4122 Assert(iStReg < 8);
4123 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4124 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4125 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4126 pCtx->fpu.FTW |= RT_BIT(iReg);
4127 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
4128}
4129
4130
4131/**
4132 * Only updates the FPU status word (FSW) with the result of the current
4133 * instruction.
4134 *
4135 * @param pCtx The CPU context.
4136 * @param u16FSW The FSW output of the current instruction.
4137 */
4138static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
4139{
4140 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4141 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4142}
4143
4144
4145/**
4146 * Pops one item off the FPU stack if no pending exception prevents it.
4147 *
4148 * @param pCtx The CPU context.
4149 */
4150static void iemFpuMaybePopOne(PCPUMCTX pCtx)
4151{
4152 /* Check pending exceptions. */
4153 uint16_t uFSW = pCtx->fpu.FSW;
4154 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4155 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4156 return;
4157
4158 /* TOP--. */
4159 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4160 uFSW &= ~X86_FSW_TOP_MASK;
4161 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4162 pCtx->fpu.FSW = uFSW;
4163
4164 /* Mark the previous ST0 as empty. */
4165 iOldTop >>= X86_FSW_TOP_SHIFT;
4166 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
4167
4168 /* Rotate the registers. */
4169 iemFpuRotateStackPop(pCtx);
4170}
4171
4172
4173/**
4174 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4175 *
4176 * @param pIemCpu The IEM per CPU data.
4177 * @param pResult The FPU operation result to push.
4178 */
4179static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
4180{
4181 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4182 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4183 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4184}
4185
4186
4187/**
4188 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4189 * and sets FPUDP and FPUDS.
4190 *
4191 * @param pIemCpu The IEM per CPU data.
4192 * @param pResult The FPU operation result to push.
4193 * @param iEffSeg The effective segment register.
4194 * @param GCPtrEff The effective address relative to @a iEffSeg.
4195 */
4196static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4197{
4198 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4199 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4200 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4201 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4202}
4203
4204
4205/**
4206 * Replace ST0 with the first value and push the second onto the FPU stack,
4207 * unless a pending exception prevents it.
4208 *
4209 * @param pIemCpu The IEM per CPU data.
4210 * @param pResult The FPU operation result to store and push.
4211 */
4212static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
4213{
4214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4215 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4216
4217 /* Update FSW and bail if there are pending exceptions afterwards. */
4218 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4219 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4220 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4221 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4222 {
4223 pCtx->fpu.FSW = fFsw;
4224 return;
4225 }
4226
4227 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4228 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4229 {
4230 /* All is fine, push the actual value. */
4231 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4232 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
4233 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
4234 }
4235 else if (pCtx->fpu.FCW & X86_FCW_IM)
4236 {
4237 /* Masked stack overflow, push QNaN. */
4238 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4239 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4240 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4241 }
4242 else
4243 {
4244 /* Raise stack overflow, don't push anything. */
4245 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4246 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4247 return;
4248 }
4249
4250 fFsw &= ~X86_FSW_TOP_MASK;
4251 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4252 pCtx->fpu.FSW = fFsw;
4253
4254 iemFpuRotateStackPush(pCtx);
4255}
4256
4257
4258/**
4259 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4260 * FOP.
4261 *
4262 * @param pIemCpu The IEM per CPU data.
4263 * @param pResult The result to store.
4264 * @param iStReg Which FPU register to store it in.
4265 * @param pCtx The CPU context.
4266 */
4267static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4268{
4269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4270 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4271 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4272}
4273
4274
4275/**
4276 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4277 * FOP, and then pops the stack.
4278 *
4279 * @param pIemCpu The IEM per CPU data.
4280 * @param pResult The result to store.
4281 * @param iStReg Which FPU register to store it in.
4282 * @param pCtx The CPU context.
4283 */
4284static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4285{
4286 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4287 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4288 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4289 iemFpuMaybePopOne(pCtx);
4290}
4291
4292
4293/**
4294 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4295 * FPUDP, and FPUDS.
4296 *
4297 * @param pIemCpu The IEM per CPU data.
4298 * @param pResult The result to store.
4299 * @param iStReg Which FPU register to store it in.
4300 * @param pCtx The CPU context.
4301 * @param iEffSeg The effective memory operand selector register.
4302 * @param GCPtrEff The effective memory operand offset.
4303 */
4304static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4305{
4306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4307 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
4308 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4309 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4310}
4311
4312
4313/**
4314 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4315 * FPUDP, and FPUDS, and then pops the stack.
4316 *
4317 * @param pIemCpu The IEM per CPU data.
4318 * @param pResult The result to store.
4319 * @param iStReg Which FPU register to store it in.
4320 * @param pCtx The CPU context.
4321 * @param iEffSeg The effective memory operand selector register.
4322 * @param GCPtrEff The effective memory operand offset.
4323 */
4324static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4325 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4326{
4327 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4328 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4329 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4330 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4331 iemFpuMaybePopOne(pCtx);
4332}
4333
4334
4335/**
4336 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4337 *
4338 * @param pIemCpu The IEM per CPU data.
4339 */
4340static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4341{
4342 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4343}
4344
4345
4346/**
4347 * Marks the specified stack register as free (for FFREE).
4348 *
4349 * @param pIemCpu The IEM per CPU data.
4350 * @param iStReg The register to free.
4351 */
4352static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4353{
4354 Assert(iStReg < 8);
4355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4356 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4357 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4358}
4359
4360
4361/**
4362 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4363 *
4364 * @param pIemCpu The IEM per CPU data.
4365 */
4366static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4367{
4368 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4369 uint16_t uFsw = pCtx->fpu.FSW;
4370 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4371 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4372 uFsw &= ~X86_FSW_TOP_MASK;
4373 uFsw |= uTop;
4374 pCtx->fpu.FSW = uFsw;
4375}
4376
4377
4378/**
4379 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4380 *
4381 * @param pIemCpu The IEM per CPU data.
4382 */
4383static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4384{
4385 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4386 uint16_t uFsw = pCtx->fpu.FSW;
4387 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4388 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4389 uFsw &= ~X86_FSW_TOP_MASK;
4390 uFsw |= uTop;
4391 pCtx->fpu.FSW = uFsw;
4392}
4393
4394
4395/**
4396 * Updates the FSW, FOP, FPUIP, and FPUCS.
4397 *
4398 * @param pIemCpu The IEM per CPU data.
4399 * @param u16FSW The FSW from the current instruction.
4400 */
4401static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4402{
4403 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4404 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4405 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4406}
4407
4408
4409/**
4410 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4411 *
4412 * @param pIemCpu The IEM per CPU data.
4413 * @param u16FSW The FSW from the current instruction.
4414 */
4415static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4416{
4417 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4418 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4419 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4420 iemFpuMaybePopOne(pCtx);
4421}
4422
4423
4424/**
4425 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4426 *
4427 * @param pIemCpu The IEM per CPU data.
4428 * @param u16FSW The FSW from the current instruction.
4429 * @param iEffSeg The effective memory operand selector register.
4430 * @param GCPtrEff The effective memory operand offset.
4431 */
4432static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4433{
4434 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4435 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4436 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4437 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4438}
4439
4440
4441/**
4442 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4443 *
4444 * @param pIemCpu The IEM per CPU data.
4445 * @param u16FSW The FSW from the current instruction.
4446 */
4447static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4448{
4449 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4450 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4451 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4452 iemFpuMaybePopOne(pCtx);
4453 iemFpuMaybePopOne(pCtx);
4454}
4455
4456
4457/**
4458 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4459 *
4460 * @param pIemCpu The IEM per CPU data.
4461 * @param u16FSW The FSW from the current instruction.
4462 * @param iEffSeg The effective memory operand selector register.
4463 * @param GCPtrEff The effective memory operand offset.
4464 */
4465static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4466{
4467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4468 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4469 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4470 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4471 iemFpuMaybePopOne(pCtx);
4472}
4473
4474
4475/**
4476 * Worker routine for raising an FPU stack underflow exception.
4477 *
4478 * @param pIemCpu The IEM per CPU data.
4479 * @param iStReg The stack register being accessed.
4480 * @param pCtx The CPU context.
4481 */
4482static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4483{
4484 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4485 if (pCtx->fpu.FCW & X86_FCW_IM)
4486 {
4487 /* Masked underflow. */
4488 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4489 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4490 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4491 if (iStReg != UINT8_MAX)
4492 {
4493 pCtx->fpu.FTW |= RT_BIT(iReg);
4494 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4495 }
4496 }
4497 else
4498 {
4499 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4500 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4501 }
4502}
4503
4504
4505/**
4506 * Raises a FPU stack underflow exception.
4507 *
4508 * @param pIemCpu The IEM per CPU data.
4509 * @param iStReg The destination register that should be loaded
4510 * with QNaN if \#IS is not masked. Specify
4511 * UINT8_MAX if none (like for fcom).
4512 */
4513DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4514{
4515 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4516 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4517 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4518}
4519
4520
4521DECL_NO_INLINE(static, void)
4522iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4523{
4524 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4525 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4526 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4527 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4528}
4529
4530
4531DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4532{
4533 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4534 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4535 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4536 iemFpuMaybePopOne(pCtx);
4537}
4538
4539
4540DECL_NO_INLINE(static, void)
4541iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4542{
4543 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4544 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4545 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4546 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4547 iemFpuMaybePopOne(pCtx);
4548}
4549
4550
4551DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4552{
4553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4554 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4555 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4556 iemFpuMaybePopOne(pCtx);
4557 iemFpuMaybePopOne(pCtx);
4558}
4559
4560
4561DECL_NO_INLINE(static, void)
4562iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4563{
4564 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4565 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4566
4567 if (pCtx->fpu.FCW & X86_FCW_IM)
4568 {
4569 /* Masked overflow - Push QNaN. */
4570 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4571 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4572 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4573 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4574 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4575 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4576 iemFpuRotateStackPush(pCtx);
4577 }
4578 else
4579 {
4580 /* Exception pending - don't change TOP or the register stack. */
4581 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4582 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4583 }
4584}
4585
4586
4587DECL_NO_INLINE(static, void)
4588iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4589{
4590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4591 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4592
4593 if (pCtx->fpu.FCW & X86_FCW_IM)
4594 {
4595 /* Masked overflow - Push QNaN. */
4596 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4597 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4598 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4599 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4600 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4601 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4602 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4603 iemFpuRotateStackPush(pCtx);
4604 }
4605 else
4606 {
4607 /* Exception pending - don't change TOP or the register stack. */
4608 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4609 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4610 }
4611}
4612
4613
4614/**
4615 * Worker routine for raising an FPU stack overflow exception on a push.
4616 *
4617 * @param pIemCpu The IEM per CPU data.
4618 * @param pCtx The CPU context.
4619 */
4620static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4621{
4622 if (pCtx->fpu.FCW & X86_FCW_IM)
4623 {
4624 /* Masked overflow. */
4625 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4626 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4627 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4628 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4629 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4630 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4631 iemFpuRotateStackPush(pCtx);
4632 }
4633 else
4634 {
4635 /* Exception pending - don't change TOP or the register stack. */
4636 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4637 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4638 }
4639}
4640
4641
4642/**
4643 * Raises a FPU stack overflow exception on a push.
4644 *
4645 * @param pIemCpu The IEM per CPU data.
4646 */
4647DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4648{
4649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4650 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4651 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4652}
4653
4654
4655/**
4656 * Raises a FPU stack overflow exception on a push with a memory operand.
4657 *
4658 * @param pIemCpu The IEM per CPU data.
4659 * @param iEffSeg The effective memory operand selector register.
4660 * @param GCPtrEff The effective memory operand offset.
4661 */
4662DECL_NO_INLINE(static, void)
4663iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4664{
4665 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4666 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4667 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4668 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4669}
4670
4671
4672static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4673{
4674 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4675 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4676 if (pCtx->fpu.FTW & RT_BIT(iReg))
4677 return VINF_SUCCESS;
4678 return VERR_NOT_FOUND;
4679}
4680
4681
4682static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4683{
4684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4685 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4686 if (pCtx->fpu.FTW & RT_BIT(iReg))
4687 {
4688 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4689 return VINF_SUCCESS;
4690 }
4691 return VERR_NOT_FOUND;
4692}
4693
4694
4695static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4696 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4697{
4698 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4699 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4700 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4701 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4702 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4703 {
4704 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4705 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4706 return VINF_SUCCESS;
4707 }
4708 return VERR_NOT_FOUND;
4709}
4710
4711
4712static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4713{
4714 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4715 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4716 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4717 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4718 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4719 {
4720 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4721 return VINF_SUCCESS;
4722 }
4723 return VERR_NOT_FOUND;
4724}
4725
4726
4727/**
4728 * Updates the FPU exception status after FCW is changed.
4729 *
4730 * @param pCtx The CPU context.
4731 */
4732static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4733{
4734 uint16_t u16Fsw = pCtx->fpu.FSW;
4735 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4736 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4737 else
4738 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4739 pCtx->fpu.FSW = u16Fsw;
4740}
4741
4742
4743/**
4744 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4745 *
4746 * @returns The full FTW.
4747 * @param pCtx The CPU state.
4748 */
4749static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4750{
4751 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4752 uint16_t u16Ftw = 0;
4753 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4754 for (unsigned iSt = 0; iSt < 8; iSt++)
4755 {
4756 unsigned const iReg = (iSt + iTop) & 7;
4757 if (!(u8Ftw & RT_BIT(iReg)))
4758 u16Ftw |= 3 << (iReg * 2); /* empty */
4759 else
4760 {
4761 uint16_t uTag;
4762 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4763 if (pr80Reg->s.uExponent == 0x7fff)
4764 uTag = 2; /* Exponent is all 1's => Special. */
4765 else if (pr80Reg->s.uExponent == 0x0000)
4766 {
4767 if (pr80Reg->s.u64Mantissa == 0x0000)
4768 uTag = 1; /* All bits are zero => Zero. */
4769 else
4770 uTag = 2; /* Must be special. */
4771 }
4772 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4773 uTag = 0; /* Valid. */
4774 else
4775 uTag = 2; /* Must be special. */
4776
4777 u16Ftw |= uTag << (iReg * 2); /* empty */
4778 }
4779 }
4780
4781 return u16Ftw;
4782}
4783
4784
4785/**
4786 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4787 *
4788 * @returns The compressed FTW.
4789 * @param u16FullFtw The full FTW to convert.
4790 */
4791static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4792{
4793 uint8_t u8Ftw = 0;
4794 for (unsigned i = 0; i < 8; i++)
4795 {
4796 if ((u16FullFtw & 3) != 3 /*empty*/)
4797 u8Ftw |= RT_BIT(i);
4798 u16FullFtw >>= 2;
4799 }
4800
4801 return u8Ftw;
4802}
4803
4804/** @} */
4805
4806
4807/** @name Memory access.
4808 *
4809 * @{
4810 */
4811
4812
4813/**
4814 * Updates the IEMCPU::cbWritten counter if applicable.
4815 *
4816 * @param pIemCpu The IEM per CPU data.
4817 * @param fAccess The access being accounted for.
4818 * @param cbMem The access size.
4819 */
4820DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4821{
4822 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4823 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4824 pIemCpu->cbWritten += (uint32_t)cbMem;
4825}
4826
4827
4828/**
4829 * Checks if the given segment can be written to, raise the appropriate
4830 * exception if not.
4831 *
4832 * @returns VBox strict status code.
4833 *
4834 * @param pIemCpu The IEM per CPU data.
4835 * @param pHid Pointer to the hidden register.
4836 * @param iSegReg The register number.
4837 * @param pu64BaseAddr Where to return the base address to use for the
4838 * segment. (In 64-bit code it may differ from the
4839 * base in the hidden segment.)
4840 */
4841static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4842{
4843 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4844 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4845 else
4846 {
4847 if (!pHid->Attr.n.u1Present)
4848 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4849
4850 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4851 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4852 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4853 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4854 *pu64BaseAddr = pHid->u64Base;
4855 }
4856 return VINF_SUCCESS;
4857}
4858
4859
4860/**
4861 * Checks if the given segment can be read from, raise the appropriate
4862 * exception if not.
4863 *
4864 * @returns VBox strict status code.
4865 *
4866 * @param pIemCpu The IEM per CPU data.
4867 * @param pHid Pointer to the hidden register.
4868 * @param iSegReg The register number.
4869 * @param pu64BaseAddr Where to return the base address to use for the
4870 * segment. (In 64-bit code it may differ from the
4871 * base in the hidden segment.)
4872 */
4873static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4874{
4875 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4876 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4877 else
4878 {
4879 if (!pHid->Attr.n.u1Present)
4880 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4881
4882 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4883 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4884 *pu64BaseAddr = pHid->u64Base;
4885 }
4886 return VINF_SUCCESS;
4887}
4888
4889
4890/**
4891 * Applies the segment limit, base and attributes.
4892 *
4893 * This may raise a \#GP or \#SS.
4894 *
4895 * @returns VBox strict status code.
4896 *
4897 * @param pIemCpu The IEM per CPU data.
4898 * @param fAccess The kind of access which is being performed.
4899 * @param iSegReg The index of the segment register to apply.
4900 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4901 * TSS, ++).
4902 * @param pGCPtrMem Pointer to the guest memory address to apply
4903 * segmentation to. Input and output parameter.
4904 */
4905static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4906 size_t cbMem, PRTGCPTR pGCPtrMem)
4907{
4908 if (iSegReg == UINT8_MAX)
4909 return VINF_SUCCESS;
4910
4911 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4912 switch (pIemCpu->enmCpuMode)
4913 {
4914 case IEMMODE_16BIT:
4915 case IEMMODE_32BIT:
4916 {
4917 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4918 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4919
4920 Assert(pSel->Attr.n.u1Present);
4921 Assert(pSel->Attr.n.u1DescType);
4922 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4923 {
4924 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4925 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4926 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4927
4928 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4929 {
4930 /** @todo CPL check. */
4931 }
4932
4933 /*
4934 * There are two kinds of data selectors, normal and expand down.
4935 */
4936 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4937 {
4938 if ( GCPtrFirst32 > pSel->u32Limit
4939 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4940 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4941
4942 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4943 }
4944 else
4945 {
4946 /** @todo implement expand down segments. */
4947 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4948 }
4949 }
4950 else
4951 {
4952
4953 /*
4954 * Code selector and usually be used to read thru, writing is
4955 * only permitted in real and V8086 mode.
4956 */
4957 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4958 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4959 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4960 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4961 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4962
4963 if ( GCPtrFirst32 > pSel->u32Limit
4964 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4965 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4966
4967 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4968 {
4969 /** @todo CPL check. */
4970 }
4971
4972 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4973 }
4974 return VINF_SUCCESS;
4975 }
4976
4977 case IEMMODE_64BIT:
4978 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4979 *pGCPtrMem += pSel->u64Base;
4980 return VINF_SUCCESS;
4981
4982 default:
4983 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4984 }
4985}
4986
4987
4988/**
4989 * Translates a virtual address to a physical physical address and checks if we
4990 * can access the page as specified.
4991 *
4992 * @param pIemCpu The IEM per CPU data.
4993 * @param GCPtrMem The virtual address.
4994 * @param fAccess The intended access.
4995 * @param pGCPhysMem Where to return the physical address.
4996 */
4997static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4998 PRTGCPHYS pGCPhysMem)
4999{
5000 /** @todo Need a different PGM interface here. We're currently using
5001 * generic / REM interfaces. this won't cut it for R0 & RC. */
5002 RTGCPHYS GCPhys;
5003 uint64_t fFlags;
5004 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
5005 if (RT_FAILURE(rc))
5006 {
5007 /** @todo Check unassigned memory in unpaged mode. */
5008 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5009 *pGCPhysMem = NIL_RTGCPHYS;
5010 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
5011 }
5012
5013 /* If the page is writable and does not have the no-exec bit set, all
5014 access is allowed. Otherwise we'll have to check more carefully... */
5015 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5016 {
5017 /* Write to read only memory? */
5018 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5019 && !(fFlags & X86_PTE_RW)
5020 && ( pIemCpu->uCpl != 0
5021 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
5022 {
5023 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5024 *pGCPhysMem = NIL_RTGCPHYS;
5025 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5026 }
5027
5028 /* Kernel memory accessed by userland? */
5029 if ( !(fFlags & X86_PTE_US)
5030 && pIemCpu->uCpl == 3
5031 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5032 {
5033 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5034 *pGCPhysMem = NIL_RTGCPHYS;
5035 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5036 }
5037
5038 /* Executing non-executable memory? */
5039 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5040 && (fFlags & X86_PTE_PAE_NX)
5041 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
5042 {
5043 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5044 *pGCPhysMem = NIL_RTGCPHYS;
5045 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5046 VERR_ACCESS_DENIED);
5047 }
5048 }
5049
5050 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
5051 *pGCPhysMem = GCPhys;
5052 return VINF_SUCCESS;
5053}
5054
5055
5056
5057/**
5058 * Maps a physical page.
5059 *
5060 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
5061 * @param pIemCpu The IEM per CPU data.
5062 * @param GCPhysMem The physical address.
5063 * @param fAccess The intended access.
5064 * @param ppvMem Where to return the mapping address.
5065 * @param pLock The PGM lock.
5066 */
5067static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
5068{
5069#ifdef IEM_VERIFICATION_MODE_FULL
5070 /* Force the alternative path so we can ignore writes. */
5071 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
5072 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5073#endif
5074#ifdef IEM_LOG_MEMORY_WRITES
5075 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5076 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5077#endif
5078#ifdef IEM_VERIFICATION_MODE_MINIMAL
5079 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5080#endif
5081
5082 /** @todo This API may require some improving later. A private deal with PGM
5083 * regarding locking and unlocking needs to be struct. A couple of TLBs
5084 * living in PGM, but with publicly accessible inlined access methods
5085 * could perhaps be an even better solution. */
5086 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
5087 GCPhysMem,
5088 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
5089 pIemCpu->fBypassHandlers,
5090 ppvMem,
5091 pLock);
5092 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
5093 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5094 return rc;
5095}
5096
5097
5098/**
5099 * Unmap a page previously mapped by iemMemPageMap.
5100 *
5101 * @param pIemCpu The IEM per CPU data.
5102 * @param GCPhysMem The physical address.
5103 * @param fAccess The intended access.
5104 * @param pvMem What iemMemPageMap returned.
5105 * @param pLock The PGM lock.
5106 */
5107DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
5108{
5109 NOREF(pIemCpu);
5110 NOREF(GCPhysMem);
5111 NOREF(fAccess);
5112 NOREF(pvMem);
5113 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
5114}
5115
5116
5117/**
5118 * Looks up a memory mapping entry.
5119 *
5120 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5121 * @param pIemCpu The IEM per CPU data.
5122 * @param pvMem The memory address.
5123 * @param fAccess The access to.
5124 */
5125DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5126{
5127 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5128 if ( pIemCpu->aMemMappings[0].pv == pvMem
5129 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5130 return 0;
5131 if ( pIemCpu->aMemMappings[1].pv == pvMem
5132 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5133 return 1;
5134 if ( pIemCpu->aMemMappings[2].pv == pvMem
5135 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5136 return 2;
5137 return VERR_NOT_FOUND;
5138}
5139
5140
5141/**
5142 * Finds a free memmap entry when using iNextMapping doesn't work.
5143 *
5144 * @returns Memory mapping index, 1024 on failure.
5145 * @param pIemCpu The IEM per CPU data.
5146 */
5147static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
5148{
5149 /*
5150 * The easy case.
5151 */
5152 if (pIemCpu->cActiveMappings == 0)
5153 {
5154 pIemCpu->iNextMapping = 1;
5155 return 0;
5156 }
5157
5158 /* There should be enough mappings for all instructions. */
5159 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
5160
5161 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
5162 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5163 return i;
5164
5165 AssertFailedReturn(1024);
5166}
5167
5168
5169/**
5170 * Commits a bounce buffer that needs writing back and unmaps it.
5171 *
5172 * @returns Strict VBox status code.
5173 * @param pIemCpu The IEM per CPU data.
5174 * @param iMemMap The index of the buffer to commit.
5175 */
5176static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
5177{
5178 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5179 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5180
5181 /*
5182 * Do the writing.
5183 */
5184 int rc;
5185#ifndef IEM_VERIFICATION_MODE_MINIMAL
5186 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
5187 && !IEM_VERIFICATION_ENABLED(pIemCpu))
5188 {
5189 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5190 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5191 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5192 if (!pIemCpu->fBypassHandlers)
5193 {
5194 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5195 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5196 pbBuf,
5197 cbFirst);
5198 if (cbSecond && rc == VINF_SUCCESS)
5199 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5200 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5201 pbBuf + cbFirst,
5202 cbSecond);
5203 }
5204 else
5205 {
5206 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5207 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5208 pbBuf,
5209 cbFirst);
5210 if (cbSecond && rc == VINF_SUCCESS)
5211 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5212 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5213 pbBuf + cbFirst,
5214 cbSecond);
5215 }
5216 if (rc != VINF_SUCCESS)
5217 {
5218 /** @todo status code handling */
5219 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5220 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
5221 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5222 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5223 }
5224 }
5225 else
5226#endif
5227 rc = VINF_SUCCESS;
5228
5229#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5230 /*
5231 * Record the write(s).
5232 */
5233 if (!pIemCpu->fNoRem)
5234 {
5235 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5236 if (pEvtRec)
5237 {
5238 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5239 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
5240 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5241 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
5242 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
5243 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5244 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5245 }
5246 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5247 {
5248 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5249 if (pEvtRec)
5250 {
5251 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5252 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
5253 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5254 memcpy(pEvtRec->u.RamWrite.ab,
5255 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
5256 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
5257 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5258 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5259 }
5260 }
5261 }
5262#endif
5263#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
5264 if (rc == VINF_SUCCESS)
5265 {
5266 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5267 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
5268 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5269 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5270 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
5271 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
5272
5273 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5274 g_cbIemWrote = cbWrote;
5275 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5276 }
5277#endif
5278
5279 /*
5280 * Free the mapping entry.
5281 */
5282 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5283 Assert(pIemCpu->cActiveMappings != 0);
5284 pIemCpu->cActiveMappings--;
5285 return rc;
5286}
5287
5288
5289/**
5290 * iemMemMap worker that deals with a request crossing pages.
5291 */
5292static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
5293 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5294{
5295 /*
5296 * Do the address translations.
5297 */
5298 RTGCPHYS GCPhysFirst;
5299 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5300 if (rcStrict != VINF_SUCCESS)
5301 return rcStrict;
5302
5303/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
5304 * last byte. */
5305 RTGCPHYS GCPhysSecond;
5306 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
5307 if (rcStrict != VINF_SUCCESS)
5308 return rcStrict;
5309 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
5310
5311 /*
5312 * Read in the current memory content if it's a read, execute or partial
5313 * write access.
5314 */
5315 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5316 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
5317 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5318
5319 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5320 {
5321 int rc;
5322 if (!pIemCpu->fBypassHandlers)
5323 {
5324 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5325 if (rc != VINF_SUCCESS)
5326 {
5327 /** @todo status code handling */
5328 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5329 return rc;
5330 }
5331 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5332 if (rc != VINF_SUCCESS)
5333 {
5334 /** @todo status code handling */
5335 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5336 return rc;
5337 }
5338 }
5339 else
5340 {
5341 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5342 if (rc != VINF_SUCCESS)
5343 {
5344 /** @todo status code handling */
5345 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5346 return rc;
5347 }
5348 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5349 if (rc != VINF_SUCCESS)
5350 {
5351 /** @todo status code handling */
5352 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5353 return rc;
5354 }
5355 }
5356
5357#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5358 if ( !pIemCpu->fNoRem
5359 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5360 {
5361 /*
5362 * Record the reads.
5363 */
5364 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5365 if (pEvtRec)
5366 {
5367 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5368 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5369 pEvtRec->u.RamRead.cb = cbFirstPage;
5370 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5371 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5372 }
5373 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5374 if (pEvtRec)
5375 {
5376 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5377 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5378 pEvtRec->u.RamRead.cb = cbSecondPage;
5379 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5380 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5381 }
5382 }
5383#endif
5384 }
5385#ifdef VBOX_STRICT
5386 else
5387 memset(pbBuf, 0xcc, cbMem);
5388#endif
5389#ifdef VBOX_STRICT
5390 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5391 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5392#endif
5393
5394 /*
5395 * Commit the bounce buffer entry.
5396 */
5397 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5398 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5399 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5400 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5401 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5402 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5403 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5404 pIemCpu->cActiveMappings++;
5405
5406 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5407 *ppvMem = pbBuf;
5408 return VINF_SUCCESS;
5409}
5410
5411
5412/**
5413 * iemMemMap woker that deals with iemMemPageMap failures.
5414 */
5415static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5416 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5417{
5418 /*
5419 * Filter out conditions we can handle and the ones which shouldn't happen.
5420 */
5421 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5422 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5423 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5424 {
5425 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5426 return rcMap;
5427 }
5428 pIemCpu->cPotentialExits++;
5429
5430 /*
5431 * Read in the current memory content if it's a read, execute or partial
5432 * write access.
5433 */
5434 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5435 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5436 {
5437 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5438 memset(pbBuf, 0xff, cbMem);
5439 else
5440 {
5441 int rc;
5442 if (!pIemCpu->fBypassHandlers)
5443 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5444 else
5445 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5446 if (rc != VINF_SUCCESS)
5447 {
5448 /** @todo status code handling */
5449 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5450 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5451 return rc;
5452 }
5453 }
5454
5455#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5456 if ( !pIemCpu->fNoRem
5457 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5458 {
5459 /*
5460 * Record the read.
5461 */
5462 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5463 if (pEvtRec)
5464 {
5465 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5466 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5467 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5468 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5469 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5470 }
5471 }
5472#endif
5473 }
5474#ifdef VBOX_STRICT
5475 else
5476 memset(pbBuf, 0xcc, cbMem);
5477#endif
5478#ifdef VBOX_STRICT
5479 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5480 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5481#endif
5482
5483 /*
5484 * Commit the bounce buffer entry.
5485 */
5486 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5487 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5488 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5489 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5490 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5491 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5492 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5493 pIemCpu->cActiveMappings++;
5494
5495 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5496 *ppvMem = pbBuf;
5497 return VINF_SUCCESS;
5498}
5499
5500
5501
5502/**
5503 * Maps the specified guest memory for the given kind of access.
5504 *
5505 * This may be using bounce buffering of the memory if it's crossing a page
5506 * boundary or if there is an access handler installed for any of it. Because
5507 * of lock prefix guarantees, we're in for some extra clutter when this
5508 * happens.
5509 *
5510 * This may raise a \#GP, \#SS, \#PF or \#AC.
5511 *
5512 * @returns VBox strict status code.
5513 *
5514 * @param pIemCpu The IEM per CPU data.
5515 * @param ppvMem Where to return the pointer to the mapped
5516 * memory.
5517 * @param cbMem The number of bytes to map. This is usually 1,
5518 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5519 * string operations it can be up to a page.
5520 * @param iSegReg The index of the segment register to use for
5521 * this access. The base and limits are checked.
5522 * Use UINT8_MAX to indicate that no segmentation
5523 * is required (for IDT, GDT and LDT accesses).
5524 * @param GCPtrMem The address of the guest memory.
5525 * @param a_fAccess How the memory is being accessed. The
5526 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5527 * how to map the memory, while the
5528 * IEM_ACCESS_WHAT_XXX bit is used when raising
5529 * exceptions.
5530 */
5531static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5532{
5533 /*
5534 * Check the input and figure out which mapping entry to use.
5535 */
5536 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */
5537 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5538
5539 unsigned iMemMap = pIemCpu->iNextMapping;
5540 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5541 {
5542 iMemMap = iemMemMapFindFree(pIemCpu);
5543 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5544 }
5545
5546 /*
5547 * Map the memory, checking that we can actually access it. If something
5548 * slightly complicated happens, fall back on bounce buffering.
5549 */
5550 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5551 if (rcStrict != VINF_SUCCESS)
5552 return rcStrict;
5553
5554 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5555 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5556
5557 RTGCPHYS GCPhysFirst;
5558 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5559 if (rcStrict != VINF_SUCCESS)
5560 return rcStrict;
5561
5562 void *pvMem;
5563 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5564 if (rcStrict != VINF_SUCCESS)
5565 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5566
5567 /*
5568 * Fill in the mapping table entry.
5569 */
5570 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5571 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5572 pIemCpu->iNextMapping = iMemMap + 1;
5573 pIemCpu->cActiveMappings++;
5574
5575 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5576 *ppvMem = pvMem;
5577 return VINF_SUCCESS;
5578}
5579
5580
5581/**
5582 * Commits the guest memory if bounce buffered and unmaps it.
5583 *
5584 * @returns Strict VBox status code.
5585 * @param pIemCpu The IEM per CPU data.
5586 * @param pvMem The mapping.
5587 * @param fAccess The kind of access.
5588 */
5589static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5590{
5591 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5592 AssertReturn(iMemMap >= 0, iMemMap);
5593
5594 /* If it's bounce buffered, we may need to write back the buffer. */
5595 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5596 {
5597 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5598 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5599 }
5600 /* Otherwise unlock it. */
5601 else
5602 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5603
5604 /* Free the entry. */
5605 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5606 Assert(pIemCpu->cActiveMappings != 0);
5607 pIemCpu->cActiveMappings--;
5608 return VINF_SUCCESS;
5609}
5610
5611
5612/**
5613 * Fetches a data byte.
5614 *
5615 * @returns Strict VBox status code.
5616 * @param pIemCpu The IEM per CPU data.
5617 * @param pu8Dst Where to return the byte.
5618 * @param iSegReg The index of the segment register to use for
5619 * this access. The base and limits are checked.
5620 * @param GCPtrMem The address of the guest memory.
5621 */
5622static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5623{
5624 /* The lazy approach for now... */
5625 uint8_t const *pu8Src;
5626 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5627 if (rc == VINF_SUCCESS)
5628 {
5629 *pu8Dst = *pu8Src;
5630 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5631 }
5632 return rc;
5633}
5634
5635
5636/**
5637 * Fetches a data word.
5638 *
5639 * @returns Strict VBox status code.
5640 * @param pIemCpu The IEM per CPU data.
5641 * @param pu16Dst Where to return the word.
5642 * @param iSegReg The index of the segment register to use for
5643 * this access. The base and limits are checked.
5644 * @param GCPtrMem The address of the guest memory.
5645 */
5646static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5647{
5648 /* The lazy approach for now... */
5649 uint16_t const *pu16Src;
5650 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5651 if (rc == VINF_SUCCESS)
5652 {
5653 *pu16Dst = *pu16Src;
5654 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5655 }
5656 return rc;
5657}
5658
5659
5660/**
5661 * Fetches a data dword.
5662 *
5663 * @returns Strict VBox status code.
5664 * @param pIemCpu The IEM per CPU data.
5665 * @param pu32Dst Where to return the dword.
5666 * @param iSegReg The index of the segment register to use for
5667 * this access. The base and limits are checked.
5668 * @param GCPtrMem The address of the guest memory.
5669 */
5670static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5671{
5672 /* The lazy approach for now... */
5673 uint32_t const *pu32Src;
5674 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5675 if (rc == VINF_SUCCESS)
5676 {
5677 *pu32Dst = *pu32Src;
5678 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5679 }
5680 return rc;
5681}
5682
5683
5684#ifdef SOME_UNUSED_FUNCTION
5685/**
5686 * Fetches a data dword and sign extends it to a qword.
5687 *
5688 * @returns Strict VBox status code.
5689 * @param pIemCpu The IEM per CPU data.
5690 * @param pu64Dst Where to return the sign extended value.
5691 * @param iSegReg The index of the segment register to use for
5692 * this access. The base and limits are checked.
5693 * @param GCPtrMem The address of the guest memory.
5694 */
5695static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5696{
5697 /* The lazy approach for now... */
5698 int32_t const *pi32Src;
5699 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5700 if (rc == VINF_SUCCESS)
5701 {
5702 *pu64Dst = *pi32Src;
5703 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5704 }
5705#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5706 else
5707 *pu64Dst = 0;
5708#endif
5709 return rc;
5710}
5711#endif
5712
5713
5714/**
5715 * Fetches a data qword.
5716 *
5717 * @returns Strict VBox status code.
5718 * @param pIemCpu The IEM per CPU data.
5719 * @param pu64Dst Where to return the qword.
5720 * @param iSegReg The index of the segment register to use for
5721 * this access. The base and limits are checked.
5722 * @param GCPtrMem The address of the guest memory.
5723 */
5724static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5725{
5726 /* The lazy approach for now... */
5727 uint64_t const *pu64Src;
5728 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5729 if (rc == VINF_SUCCESS)
5730 {
5731 *pu64Dst = *pu64Src;
5732 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5733 }
5734 return rc;
5735}
5736
5737
5738/**
5739 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
5740 *
5741 * @returns Strict VBox status code.
5742 * @param pIemCpu The IEM per CPU data.
5743 * @param pu64Dst Where to return the qword.
5744 * @param iSegReg The index of the segment register to use for
5745 * this access. The base and limits are checked.
5746 * @param GCPtrMem The address of the guest memory.
5747 */
5748static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5749{
5750 /* The lazy approach for now... */
5751 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
5752 if (RT_UNLIKELY(GCPtrMem & 15))
5753 return iemRaiseGeneralProtectionFault0(pIemCpu);
5754
5755 uint64_t const *pu64Src;
5756 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5757 if (rc == VINF_SUCCESS)
5758 {
5759 *pu64Dst = *pu64Src;
5760 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5761 }
5762 return rc;
5763}
5764
5765
5766/**
5767 * Fetches a data tword.
5768 *
5769 * @returns Strict VBox status code.
5770 * @param pIemCpu The IEM per CPU data.
5771 * @param pr80Dst Where to return the tword.
5772 * @param iSegReg The index of the segment register to use for
5773 * this access. The base and limits are checked.
5774 * @param GCPtrMem The address of the guest memory.
5775 */
5776static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5777{
5778 /* The lazy approach for now... */
5779 PCRTFLOAT80U pr80Src;
5780 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5781 if (rc == VINF_SUCCESS)
5782 {
5783 *pr80Dst = *pr80Src;
5784 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5785 }
5786 return rc;
5787}
5788
5789
5790/**
5791 * Fetches a data dqword (double qword), generally SSE related.
5792 *
5793 * @returns Strict VBox status code.
5794 * @param pIemCpu The IEM per CPU data.
5795 * @param pu128Dst Where to return the qword.
5796 * @param iSegReg The index of the segment register to use for
5797 * this access. The base and limits are checked.
5798 * @param GCPtrMem The address of the guest memory.
5799 */
5800static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5801{
5802 /* The lazy approach for now... */
5803 uint128_t const *pu128Src;
5804 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5805 if (rc == VINF_SUCCESS)
5806 {
5807 *pu128Dst = *pu128Src;
5808 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
5809 }
5810 return rc;
5811}
5812
5813
5814/**
5815 * Fetches a data dqword (double qword) at an aligned address, generally SSE
5816 * related.
5817 *
5818 * Raises GP(0) if not aligned.
5819 *
5820 * @returns Strict VBox status code.
5821 * @param pIemCpu The IEM per CPU data.
5822 * @param pu128Dst Where to return the qword.
5823 * @param iSegReg The index of the segment register to use for
5824 * this access. The base and limits are checked.
5825 * @param GCPtrMem The address of the guest memory.
5826 */
5827static VBOXSTRICTRC iemMemFetchDataU128Aligned(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5828{
5829 /* The lazy approach for now... */
5830 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
5831 if (RT_UNLIKELY(GCPtrMem & 15))
5832 return iemRaiseGeneralProtectionFault0(pIemCpu);
5833
5834 uint128_t const *pu128Src;
5835 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5836 if (rc == VINF_SUCCESS)
5837 {
5838 *pu128Dst = *pu128Src;
5839 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
5840 }
5841 return rc;
5842}
5843
5844
5845
5846
5847/**
5848 * Fetches a descriptor register (lgdt, lidt).
5849 *
5850 * @returns Strict VBox status code.
5851 * @param pIemCpu The IEM per CPU data.
5852 * @param pcbLimit Where to return the limit.
5853 * @param pGCPTrBase Where to return the base.
5854 * @param iSegReg The index of the segment register to use for
5855 * this access. The base and limits are checked.
5856 * @param GCPtrMem The address of the guest memory.
5857 * @param enmOpSize The effective operand size.
5858 */
5859static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5860 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5861{
5862 uint8_t const *pu8Src;
5863 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5864 (void **)&pu8Src,
5865 enmOpSize == IEMMODE_64BIT
5866 ? 2 + 8
5867 : enmOpSize == IEMMODE_32BIT
5868 ? 2 + 4
5869 : 2 + 3,
5870 iSegReg,
5871 GCPtrMem,
5872 IEM_ACCESS_DATA_R);
5873 if (rcStrict == VINF_SUCCESS)
5874 {
5875 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5876 switch (enmOpSize)
5877 {
5878 case IEMMODE_16BIT:
5879 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5880 break;
5881 case IEMMODE_32BIT:
5882 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5883 break;
5884 case IEMMODE_64BIT:
5885 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5886 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5887 break;
5888
5889 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5890 }
5891 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5892 }
5893 return rcStrict;
5894}
5895
5896
5897
5898/**
5899 * Stores a data byte.
5900 *
5901 * @returns Strict VBox status code.
5902 * @param pIemCpu The IEM per CPU data.
5903 * @param iSegReg The index of the segment register to use for
5904 * this access. The base and limits are checked.
5905 * @param GCPtrMem The address of the guest memory.
5906 * @param u8Value The value to store.
5907 */
5908static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5909{
5910 /* The lazy approach for now... */
5911 uint8_t *pu8Dst;
5912 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5913 if (rc == VINF_SUCCESS)
5914 {
5915 *pu8Dst = u8Value;
5916 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5917 }
5918 return rc;
5919}
5920
5921
5922/**
5923 * Stores a data word.
5924 *
5925 * @returns Strict VBox status code.
5926 * @param pIemCpu The IEM per CPU data.
5927 * @param iSegReg The index of the segment register to use for
5928 * this access. The base and limits are checked.
5929 * @param GCPtrMem The address of the guest memory.
5930 * @param u16Value The value to store.
5931 */
5932static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5933{
5934 /* The lazy approach for now... */
5935 uint16_t *pu16Dst;
5936 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5937 if (rc == VINF_SUCCESS)
5938 {
5939 *pu16Dst = u16Value;
5940 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5941 }
5942 return rc;
5943}
5944
5945
5946/**
5947 * Stores a data dword.
5948 *
5949 * @returns Strict VBox status code.
5950 * @param pIemCpu The IEM per CPU data.
5951 * @param iSegReg The index of the segment register to use for
5952 * this access. The base and limits are checked.
5953 * @param GCPtrMem The address of the guest memory.
5954 * @param u32Value The value to store.
5955 */
5956static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5957{
5958 /* The lazy approach for now... */
5959 uint32_t *pu32Dst;
5960 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5961 if (rc == VINF_SUCCESS)
5962 {
5963 *pu32Dst = u32Value;
5964 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5965 }
5966 return rc;
5967}
5968
5969
5970/**
5971 * Stores a data qword.
5972 *
5973 * @returns Strict VBox status code.
5974 * @param pIemCpu The IEM per CPU data.
5975 * @param iSegReg The index of the segment register to use for
5976 * this access. The base and limits are checked.
5977 * @param GCPtrMem The address of the guest memory.
5978 * @param u64Value The value to store.
5979 */
5980static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5981{
5982 /* The lazy approach for now... */
5983 uint64_t *pu64Dst;
5984 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5985 if (rc == VINF_SUCCESS)
5986 {
5987 *pu64Dst = u64Value;
5988 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5989 }
5990 return rc;
5991}
5992
5993
5994/**
5995 * Stores a data dqword.
5996 *
5997 * @returns Strict VBox status code.
5998 * @param pIemCpu The IEM per CPU data.
5999 * @param iSegReg The index of the segment register to use for
6000 * this access. The base and limits are checked.
6001 * @param GCPtrMem The address of the guest memory.
6002 * @param u64Value The value to store.
6003 */
6004static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6005{
6006 /* The lazy approach for now... */
6007 uint128_t *pu128Dst;
6008 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6009 if (rc == VINF_SUCCESS)
6010 {
6011 *pu128Dst = u128Value;
6012 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6013 }
6014 return rc;
6015}
6016
6017
6018/**
6019 * Stores a data dqword, aligned.
6020 *
6021 * @returns Strict VBox status code.
6022 * @param pIemCpu The IEM per CPU data.
6023 * @param iSegReg The index of the segment register to use for
6024 * this access. The base and limits are checked.
6025 * @param GCPtrMem The address of the guest memory.
6026 * @param u64Value The value to store.
6027 */
6028static VBOXSTRICTRC iemMemStoreDataU128Aligned(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6029{
6030 /* The lazy approach for now... */
6031 if (GCPtrMem & 15)
6032 return iemRaiseGeneralProtectionFault0(pIemCpu);
6033
6034 uint128_t *pu128Dst;
6035 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6036 if (rc == VINF_SUCCESS)
6037 {
6038 *pu128Dst = u128Value;
6039 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6040 }
6041 return rc;
6042}
6043
6044
6045/**
6046 * Stores a descriptor register (sgdt, sidt).
6047 *
6048 * @returns Strict VBox status code.
6049 * @param pIemCpu The IEM per CPU data.
6050 * @param cbLimit The limit.
6051 * @param GCPTrBase The base address.
6052 * @param iSegReg The index of the segment register to use for
6053 * this access. The base and limits are checked.
6054 * @param GCPtrMem The address of the guest memory.
6055 * @param enmOpSize The effective operand size.
6056 */
6057static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
6058 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6059{
6060 uint8_t *pu8Src;
6061 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6062 (void **)&pu8Src,
6063 enmOpSize == IEMMODE_64BIT
6064 ? 2 + 8
6065 : enmOpSize == IEMMODE_32BIT
6066 ? 2 + 4
6067 : 2 + 3,
6068 iSegReg,
6069 GCPtrMem,
6070 IEM_ACCESS_DATA_W);
6071 if (rcStrict == VINF_SUCCESS)
6072 {
6073 pu8Src[0] = RT_BYTE1(cbLimit);
6074 pu8Src[1] = RT_BYTE2(cbLimit);
6075 pu8Src[2] = RT_BYTE1(GCPtrBase);
6076 pu8Src[3] = RT_BYTE2(GCPtrBase);
6077 pu8Src[4] = RT_BYTE3(GCPtrBase);
6078 if (enmOpSize == IEMMODE_16BIT)
6079 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
6080 else
6081 {
6082 pu8Src[5] = RT_BYTE4(GCPtrBase);
6083 if (enmOpSize == IEMMODE_64BIT)
6084 {
6085 pu8Src[6] = RT_BYTE5(GCPtrBase);
6086 pu8Src[7] = RT_BYTE6(GCPtrBase);
6087 pu8Src[8] = RT_BYTE7(GCPtrBase);
6088 pu8Src[9] = RT_BYTE8(GCPtrBase);
6089 }
6090 }
6091 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
6092 }
6093 return rcStrict;
6094}
6095
6096
6097/**
6098 * Pushes a word onto the stack.
6099 *
6100 * @returns Strict VBox status code.
6101 * @param pIemCpu The IEM per CPU data.
6102 * @param u16Value The value to push.
6103 */
6104static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
6105{
6106 /* Increment the stack pointer. */
6107 uint64_t uNewRsp;
6108 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6109 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
6110
6111 /* Write the word the lazy way. */
6112 uint16_t *pu16Dst;
6113 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6114 if (rc == VINF_SUCCESS)
6115 {
6116 *pu16Dst = u16Value;
6117 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6118 }
6119
6120 /* Commit the new RSP value unless we an access handler made trouble. */
6121 if (rc == VINF_SUCCESS)
6122 pCtx->rsp = uNewRsp;
6123
6124 return rc;
6125}
6126
6127
6128/**
6129 * Pushes a dword onto the stack.
6130 *
6131 * @returns Strict VBox status code.
6132 * @param pIemCpu The IEM per CPU data.
6133 * @param u32Value The value to push.
6134 */
6135static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
6136{
6137 /* Increment the stack pointer. */
6138 uint64_t uNewRsp;
6139 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6140 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
6141
6142 /* Write the word the lazy way. */
6143 uint32_t *pu32Dst;
6144 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6145 if (rc == VINF_SUCCESS)
6146 {
6147 *pu32Dst = u32Value;
6148 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6149 }
6150
6151 /* Commit the new RSP value unless we an access handler made trouble. */
6152 if (rc == VINF_SUCCESS)
6153 pCtx->rsp = uNewRsp;
6154
6155 return rc;
6156}
6157
6158
6159/**
6160 * Pushes a qword onto the stack.
6161 *
6162 * @returns Strict VBox status code.
6163 * @param pIemCpu The IEM per CPU data.
6164 * @param u64Value The value to push.
6165 */
6166static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
6167{
6168 /* Increment the stack pointer. */
6169 uint64_t uNewRsp;
6170 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6171 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
6172
6173 /* Write the word the lazy way. */
6174 uint64_t *pu64Dst;
6175 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6176 if (rc == VINF_SUCCESS)
6177 {
6178 *pu64Dst = u64Value;
6179 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6180 }
6181
6182 /* Commit the new RSP value unless we an access handler made trouble. */
6183 if (rc == VINF_SUCCESS)
6184 pCtx->rsp = uNewRsp;
6185
6186 return rc;
6187}
6188
6189
6190/**
6191 * Pops a word from the stack.
6192 *
6193 * @returns Strict VBox status code.
6194 * @param pIemCpu The IEM per CPU data.
6195 * @param pu16Value Where to store the popped value.
6196 */
6197static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
6198{
6199 /* Increment the stack pointer. */
6200 uint64_t uNewRsp;
6201 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6202 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
6203
6204 /* Write the word the lazy way. */
6205 uint16_t const *pu16Src;
6206 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6207 if (rc == VINF_SUCCESS)
6208 {
6209 *pu16Value = *pu16Src;
6210 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6211
6212 /* Commit the new RSP value. */
6213 if (rc == VINF_SUCCESS)
6214 pCtx->rsp = uNewRsp;
6215 }
6216
6217 return rc;
6218}
6219
6220
6221/**
6222 * Pops a dword from the stack.
6223 *
6224 * @returns Strict VBox status code.
6225 * @param pIemCpu The IEM per CPU data.
6226 * @param pu32Value Where to store the popped value.
6227 */
6228static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
6229{
6230 /* Increment the stack pointer. */
6231 uint64_t uNewRsp;
6232 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6233 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
6234
6235 /* Write the word the lazy way. */
6236 uint32_t const *pu32Src;
6237 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6238 if (rc == VINF_SUCCESS)
6239 {
6240 *pu32Value = *pu32Src;
6241 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6242
6243 /* Commit the new RSP value. */
6244 if (rc == VINF_SUCCESS)
6245 pCtx->rsp = uNewRsp;
6246 }
6247
6248 return rc;
6249}
6250
6251
6252/**
6253 * Pops a qword from the stack.
6254 *
6255 * @returns Strict VBox status code.
6256 * @param pIemCpu The IEM per CPU data.
6257 * @param pu64Value Where to store the popped value.
6258 */
6259static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
6260{
6261 /* Increment the stack pointer. */
6262 uint64_t uNewRsp;
6263 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6264 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
6265
6266 /* Write the word the lazy way. */
6267 uint64_t const *pu64Src;
6268 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6269 if (rc == VINF_SUCCESS)
6270 {
6271 *pu64Value = *pu64Src;
6272 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6273
6274 /* Commit the new RSP value. */
6275 if (rc == VINF_SUCCESS)
6276 pCtx->rsp = uNewRsp;
6277 }
6278
6279 return rc;
6280}
6281
6282
6283/**
6284 * Pushes a word onto the stack, using a temporary stack pointer.
6285 *
6286 * @returns Strict VBox status code.
6287 * @param pIemCpu The IEM per CPU data.
6288 * @param u16Value The value to push.
6289 * @param pTmpRsp Pointer to the temporary stack pointer.
6290 */
6291static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
6292{
6293 /* Increment the stack pointer. */
6294 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6295 RTUINT64U NewRsp = *pTmpRsp;
6296 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
6297
6298 /* Write the word the lazy way. */
6299 uint16_t *pu16Dst;
6300 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6301 if (rc == VINF_SUCCESS)
6302 {
6303 *pu16Dst = u16Value;
6304 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6305 }
6306
6307 /* Commit the new RSP value unless we an access handler made trouble. */
6308 if (rc == VINF_SUCCESS)
6309 *pTmpRsp = NewRsp;
6310
6311 return rc;
6312}
6313
6314
6315/**
6316 * Pushes a dword onto the stack, using a temporary stack pointer.
6317 *
6318 * @returns Strict VBox status code.
6319 * @param pIemCpu The IEM per CPU data.
6320 * @param u32Value The value to push.
6321 * @param pTmpRsp Pointer to the temporary stack pointer.
6322 */
6323static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
6324{
6325 /* Increment the stack pointer. */
6326 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6327 RTUINT64U NewRsp = *pTmpRsp;
6328 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
6329
6330 /* Write the word the lazy way. */
6331 uint32_t *pu32Dst;
6332 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6333 if (rc == VINF_SUCCESS)
6334 {
6335 *pu32Dst = u32Value;
6336 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6337 }
6338
6339 /* Commit the new RSP value unless we an access handler made trouble. */
6340 if (rc == VINF_SUCCESS)
6341 *pTmpRsp = NewRsp;
6342
6343 return rc;
6344}
6345
6346
6347/**
6348 * Pushes a dword onto the stack, using a temporary stack pointer.
6349 *
6350 * @returns Strict VBox status code.
6351 * @param pIemCpu The IEM per CPU data.
6352 * @param u64Value The value to push.
6353 * @param pTmpRsp Pointer to the temporary stack pointer.
6354 */
6355static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
6356{
6357 /* Increment the stack pointer. */
6358 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6359 RTUINT64U NewRsp = *pTmpRsp;
6360 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
6361
6362 /* Write the word the lazy way. */
6363 uint64_t *pu64Dst;
6364 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6365 if (rc == VINF_SUCCESS)
6366 {
6367 *pu64Dst = u64Value;
6368 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6369 }
6370
6371 /* Commit the new RSP value unless we an access handler made trouble. */
6372 if (rc == VINF_SUCCESS)
6373 *pTmpRsp = NewRsp;
6374
6375 return rc;
6376}
6377
6378
6379/**
6380 * Pops a word from the stack, using a temporary stack pointer.
6381 *
6382 * @returns Strict VBox status code.
6383 * @param pIemCpu The IEM per CPU data.
6384 * @param pu16Value Where to store the popped value.
6385 * @param pTmpRsp Pointer to the temporary stack pointer.
6386 */
6387static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
6388{
6389 /* Increment the stack pointer. */
6390 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6391 RTUINT64U NewRsp = *pTmpRsp;
6392 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
6393
6394 /* Write the word the lazy way. */
6395 uint16_t const *pu16Src;
6396 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6397 if (rc == VINF_SUCCESS)
6398 {
6399 *pu16Value = *pu16Src;
6400 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6401
6402 /* Commit the new RSP value. */
6403 if (rc == VINF_SUCCESS)
6404 *pTmpRsp = NewRsp;
6405 }
6406
6407 return rc;
6408}
6409
6410
6411/**
6412 * Pops a dword from the stack, using a temporary stack pointer.
6413 *
6414 * @returns Strict VBox status code.
6415 * @param pIemCpu The IEM per CPU data.
6416 * @param pu32Value Where to store the popped value.
6417 * @param pTmpRsp Pointer to the temporary stack pointer.
6418 */
6419static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
6420{
6421 /* Increment the stack pointer. */
6422 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6423 RTUINT64U NewRsp = *pTmpRsp;
6424 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
6425
6426 /* Write the word the lazy way. */
6427 uint32_t const *pu32Src;
6428 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6429 if (rc == VINF_SUCCESS)
6430 {
6431 *pu32Value = *pu32Src;
6432 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6433
6434 /* Commit the new RSP value. */
6435 if (rc == VINF_SUCCESS)
6436 *pTmpRsp = NewRsp;
6437 }
6438
6439 return rc;
6440}
6441
6442
6443/**
6444 * Pops a qword from the stack, using a temporary stack pointer.
6445 *
6446 * @returns Strict VBox status code.
6447 * @param pIemCpu The IEM per CPU data.
6448 * @param pu64Value Where to store the popped value.
6449 * @param pTmpRsp Pointer to the temporary stack pointer.
6450 */
6451static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
6452{
6453 /* Increment the stack pointer. */
6454 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6455 RTUINT64U NewRsp = *pTmpRsp;
6456 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6457
6458 /* Write the word the lazy way. */
6459 uint64_t const *pu64Src;
6460 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6461 if (rcStrict == VINF_SUCCESS)
6462 {
6463 *pu64Value = *pu64Src;
6464 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6465
6466 /* Commit the new RSP value. */
6467 if (rcStrict == VINF_SUCCESS)
6468 *pTmpRsp = NewRsp;
6469 }
6470
6471 return rcStrict;
6472}
6473
6474
6475/**
6476 * Begin a special stack push (used by interrupt, exceptions and such).
6477 *
6478 * This will raise #SS or #PF if appropriate.
6479 *
6480 * @returns Strict VBox status code.
6481 * @param pIemCpu The IEM per CPU data.
6482 * @param cbMem The number of bytes to push onto the stack.
6483 * @param ppvMem Where to return the pointer to the stack memory.
6484 * As with the other memory functions this could be
6485 * direct access or bounce buffered access, so
6486 * don't commit register until the commit call
6487 * succeeds.
6488 * @param puNewRsp Where to return the new RSP value. This must be
6489 * passed unchanged to
6490 * iemMemStackPushCommitSpecial().
6491 */
6492static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6493{
6494 Assert(cbMem < UINT8_MAX);
6495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6496 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6497 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6498}
6499
6500
6501/**
6502 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6503 *
6504 * This will update the rSP.
6505 *
6506 * @returns Strict VBox status code.
6507 * @param pIemCpu The IEM per CPU data.
6508 * @param pvMem The pointer returned by
6509 * iemMemStackPushBeginSpecial().
6510 * @param uNewRsp The new RSP value returned by
6511 * iemMemStackPushBeginSpecial().
6512 */
6513static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6514{
6515 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6516 if (rcStrict == VINF_SUCCESS)
6517 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6518 return rcStrict;
6519}
6520
6521
6522/**
6523 * Begin a special stack pop (used by iret, retf and such).
6524 *
6525 * This will raise \#SS or \#PF if appropriate.
6526 *
6527 * @returns Strict VBox status code.
6528 * @param pIemCpu The IEM per CPU data.
6529 * @param cbMem The number of bytes to push onto the stack.
6530 * @param ppvMem Where to return the pointer to the stack memory.
6531 * @param puNewRsp Where to return the new RSP value. This must be
6532 * passed unchanged to
6533 * iemMemStackPopCommitSpecial() or applied
6534 * manually if iemMemStackPopDoneSpecial() is used.
6535 */
6536static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6537{
6538 Assert(cbMem < UINT8_MAX);
6539 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6540 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6541 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6542}
6543
6544
6545/**
6546 * Continue a special stack pop (used by iret and retf).
6547 *
6548 * This will raise \#SS or \#PF if appropriate.
6549 *
6550 * @returns Strict VBox status code.
6551 * @param pIemCpu The IEM per CPU data.
6552 * @param cbMem The number of bytes to push onto the stack.
6553 * @param ppvMem Where to return the pointer to the stack memory.
6554 * @param puNewRsp Where to return the new RSP value. This must be
6555 * passed unchanged to
6556 * iemMemStackPopCommitSpecial() or applied
6557 * manually if iemMemStackPopDoneSpecial() is used.
6558 */
6559static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6560{
6561 Assert(cbMem < UINT8_MAX);
6562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6563 RTUINT64U NewRsp;
6564 NewRsp.u = *puNewRsp;
6565 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6566 *puNewRsp = NewRsp.u;
6567 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6568}
6569
6570
6571/**
6572 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6573 *
6574 * This will update the rSP.
6575 *
6576 * @returns Strict VBox status code.
6577 * @param pIemCpu The IEM per CPU data.
6578 * @param pvMem The pointer returned by
6579 * iemMemStackPopBeginSpecial().
6580 * @param uNewRsp The new RSP value returned by
6581 * iemMemStackPopBeginSpecial().
6582 */
6583static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6584{
6585 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6586 if (rcStrict == VINF_SUCCESS)
6587 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6588 return rcStrict;
6589}
6590
6591
6592/**
6593 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6594 * iemMemStackPopContinueSpecial).
6595 *
6596 * The caller will manually commit the rSP.
6597 *
6598 * @returns Strict VBox status code.
6599 * @param pIemCpu The IEM per CPU data.
6600 * @param pvMem The pointer returned by
6601 * iemMemStackPopBeginSpecial() or
6602 * iemMemStackPopContinueSpecial().
6603 */
6604static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6605{
6606 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6607}
6608
6609
6610/**
6611 * Fetches a system table dword.
6612 *
6613 * @returns Strict VBox status code.
6614 * @param pIemCpu The IEM per CPU data.
6615 * @param pu32Dst Where to return the dword.
6616 * @param iSegReg The index of the segment register to use for
6617 * this access. The base and limits are checked.
6618 * @param GCPtrMem The address of the guest memory.
6619 */
6620static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6621{
6622 /* The lazy approach for now... */
6623 uint32_t const *pu32Src;
6624 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6625 if (rc == VINF_SUCCESS)
6626 {
6627 *pu32Dst = *pu32Src;
6628 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6629 }
6630 return rc;
6631}
6632
6633
6634/**
6635 * Fetches a system table qword.
6636 *
6637 * @returns Strict VBox status code.
6638 * @param pIemCpu The IEM per CPU data.
6639 * @param pu64Dst Where to return the qword.
6640 * @param iSegReg The index of the segment register to use for
6641 * this access. The base and limits are checked.
6642 * @param GCPtrMem The address of the guest memory.
6643 */
6644static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6645{
6646 /* The lazy approach for now... */
6647 uint64_t const *pu64Src;
6648 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6649 if (rc == VINF_SUCCESS)
6650 {
6651 *pu64Dst = *pu64Src;
6652 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6653 }
6654 return rc;
6655}
6656
6657
6658/**
6659 * Fetches a descriptor table entry.
6660 *
6661 * @returns Strict VBox status code.
6662 * @param pIemCpu The IEM per CPU.
6663 * @param pDesc Where to return the descriptor table entry.
6664 * @param uSel The selector which table entry to fetch.
6665 */
6666static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6667{
6668 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6669
6670 /** @todo did the 286 require all 8 bytes to be accessible? */
6671 /*
6672 * Get the selector table base and check bounds.
6673 */
6674 RTGCPTR GCPtrBase;
6675 if (uSel & X86_SEL_LDT)
6676 {
6677 if ( !pCtx->ldtr.Attr.n.u1Present
6678 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6679 {
6680 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6681 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6682 /** @todo is this the right exception? */
6683 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6684 }
6685
6686 Assert(pCtx->ldtr.Attr.n.u1Present);
6687 GCPtrBase = pCtx->ldtr.u64Base;
6688 }
6689 else
6690 {
6691 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6692 {
6693 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6694 /** @todo is this the right exception? */
6695 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6696 }
6697 GCPtrBase = pCtx->gdtr.pGdt;
6698 }
6699
6700 /*
6701 * Read the legacy descriptor and maybe the long mode extensions if
6702 * required.
6703 */
6704 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6705 if (rcStrict == VINF_SUCCESS)
6706 {
6707 if ( !IEM_IS_LONG_MODE(pIemCpu)
6708 || pDesc->Legacy.Gen.u1DescType)
6709 pDesc->Long.au64[1] = 0;
6710 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6711 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6712 else
6713 {
6714 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6715 /** @todo is this the right exception? */
6716 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6717 }
6718 }
6719 return rcStrict;
6720}
6721
6722
6723/**
6724 * Fakes a long mode stack selector for SS = 0.
6725 *
6726 * @param pDescSs Where to return the fake stack descriptor.
6727 * @param uDpl The DPL we want.
6728 */
6729static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6730{
6731 pDescSs->Long.au64[0] = 0;
6732 pDescSs->Long.au64[1] = 0;
6733 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6734 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6735 pDescSs->Long.Gen.u2Dpl = uDpl;
6736 pDescSs->Long.Gen.u1Present = 1;
6737 pDescSs->Long.Gen.u1Long = 1;
6738}
6739
6740
6741/**
6742 * Marks the selector descriptor as accessed (only non-system descriptors).
6743 *
6744 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6745 * will therefore skip the limit checks.
6746 *
6747 * @returns Strict VBox status code.
6748 * @param pIemCpu The IEM per CPU.
6749 * @param uSel The selector.
6750 */
6751static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6752{
6753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6754
6755 /*
6756 * Get the selector table base and calculate the entry address.
6757 */
6758 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6759 ? pCtx->ldtr.u64Base
6760 : pCtx->gdtr.pGdt;
6761 GCPtr += uSel & X86_SEL_MASK;
6762
6763 /*
6764 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6765 * ugly stuff to avoid this. This will make sure it's an atomic access
6766 * as well more or less remove any question about 8-bit or 32-bit accesss.
6767 */
6768 VBOXSTRICTRC rcStrict;
6769 uint32_t volatile *pu32;
6770 if ((GCPtr & 3) == 0)
6771 {
6772 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6773 GCPtr += 2 + 2;
6774 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6775 if (rcStrict != VINF_SUCCESS)
6776 return rcStrict;
6777 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6778 }
6779 else
6780 {
6781 /* The misaligned GDT/LDT case, map the whole thing. */
6782 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6783 if (rcStrict != VINF_SUCCESS)
6784 return rcStrict;
6785 switch ((uintptr_t)pu32 & 3)
6786 {
6787 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6788 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6789 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6790 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6791 }
6792 }
6793
6794 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6795}
6796
6797/** @} */
6798
6799
6800/*
6801 * Include the C/C++ implementation of instruction.
6802 */
6803#include "IEMAllCImpl.cpp.h"
6804
6805
6806
6807/** @name "Microcode" macros.
6808 *
6809 * The idea is that we should be able to use the same code to interpret
6810 * instructions as well as recompiler instructions. Thus this obfuscation.
6811 *
6812 * @{
6813 */
6814#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6815#define IEM_MC_END() }
6816#define IEM_MC_PAUSE() do {} while (0)
6817#define IEM_MC_CONTINUE() do {} while (0)
6818
6819/** Internal macro. */
6820#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6821 do \
6822 { \
6823 VBOXSTRICTRC rcStrict2 = a_Expr; \
6824 if (rcStrict2 != VINF_SUCCESS) \
6825 return rcStrict2; \
6826 } while (0)
6827
6828#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6829#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6830#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6831#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6832#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6833#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6834#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6835
6836#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6837#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6838 do { \
6839 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6840 return iemRaiseDeviceNotAvailable(pIemCpu); \
6841 } while (0)
6842#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6843 do { \
6844 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6845 return iemRaiseMathFault(pIemCpu); \
6846 } while (0)
6847#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
6848 do { \
6849 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
6850 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
6851 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
6852 return iemRaiseUndefinedOpcode(pIemCpu); \
6853 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
6854 return iemRaiseDeviceNotAvailable(pIemCpu); \
6855 } while (0)
6856#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
6857 do { \
6858 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
6859 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
6860 return iemRaiseUndefinedOpcode(pIemCpu); \
6861 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
6862 return iemRaiseDeviceNotAvailable(pIemCpu); \
6863 } while (0)
6864#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6865 do { \
6866 if (pIemCpu->uCpl != 0) \
6867 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6868 } while (0)
6869
6870
6871#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6872#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6873#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6874#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6875#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6876#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6877#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6878 uint32_t a_Name; \
6879 uint32_t *a_pName = &a_Name
6880#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6881 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6882
6883#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6884#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6885
6886#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6887#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6888#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6889#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6890#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6891#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6892#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6893#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6894#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6895#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6896#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6897#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6898#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6899#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6900#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6901#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6902#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6903#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6904#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6905#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6906#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6907#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6908#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6909#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6910#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6911#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6912#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6913#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6914#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6915/** @note Not for IOPL or IF testing or modification. */
6916#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6917#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6918#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6919#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6920
6921#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6922#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6923#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6924#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6925#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6926#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6927#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6928#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6929#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6930#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6931#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6932 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6933
6934#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6935#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6936/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6937 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6938#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6939#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6940/** @note Not for IOPL or IF testing or modification. */
6941#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6942
6943#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6944#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6945#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6946 do { \
6947 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6948 *pu32Reg += (a_u32Value); \
6949 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6950 } while (0)
6951#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6952
6953#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6954#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6955#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6956 do { \
6957 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6958 *pu32Reg -= (a_u32Value); \
6959 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6960 } while (0)
6961#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6962
6963#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6964#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6965#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6966#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6967#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6968#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6969#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6970
6971#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6972#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6973#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6974#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6975
6976#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6977#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6978#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6979
6980#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6981#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6982
6983#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6984#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6985#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6986
6987#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6988#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6989#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6990
6991#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6992
6993#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6994
6995#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6996#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6997#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6998 do { \
6999 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7000 *pu32Reg &= (a_u32Value); \
7001 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7002 } while (0)
7003#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
7004
7005#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
7006#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
7007#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
7008 do { \
7009 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7010 *pu32Reg |= (a_u32Value); \
7011 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7012 } while (0)
7013#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
7014
7015
7016/** @note Not for IOPL or IF modification. */
7017#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
7018/** @note Not for IOPL or IF modification. */
7019#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
7020/** @note Not for IOPL or IF modification. */
7021#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
7022
7023#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
7024
7025
7026#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
7027 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
7028#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
7029 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
7030#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
7031 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
7032#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
7033 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
7034#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
7035 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7036#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
7037 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7038#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
7039 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7040
7041#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
7042 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
7043#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
7044 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
7045#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
7046 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
7047#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
7048 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
7049#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
7050 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
7051 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7052 } while (0)
7053#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
7054 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
7055 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7056 } while (0)
7057#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
7058 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7059#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
7060 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7061#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
7062 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
7063
7064#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
7065 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
7066#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
7067 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
7068#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
7069 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
7070
7071#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7072 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
7073#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7074 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7075#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
7076 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
7077
7078#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7079 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
7080#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7081 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7082#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
7083 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
7084
7085#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7086 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7087
7088#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7089 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7090#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7091 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7092#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7093 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7094
7095#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
7096 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
7097#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
7098 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
7099#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
7100 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
7101
7102#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7103 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7104#define IEM_MC_FETCH_MEM_U128_ALIGN(a_u128Dst, a_iSeg, a_GCPtrMem) \
7105 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128Aligned(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7106
7107
7108
7109#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7110 do { \
7111 uint8_t u8Tmp; \
7112 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7113 (a_u16Dst) = u8Tmp; \
7114 } while (0)
7115#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7116 do { \
7117 uint8_t u8Tmp; \
7118 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7119 (a_u32Dst) = u8Tmp; \
7120 } while (0)
7121#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7122 do { \
7123 uint8_t u8Tmp; \
7124 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7125 (a_u64Dst) = u8Tmp; \
7126 } while (0)
7127#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7128 do { \
7129 uint16_t u16Tmp; \
7130 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7131 (a_u32Dst) = u16Tmp; \
7132 } while (0)
7133#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7134 do { \
7135 uint16_t u16Tmp; \
7136 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7137 (a_u64Dst) = u16Tmp; \
7138 } while (0)
7139#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7140 do { \
7141 uint32_t u32Tmp; \
7142 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7143 (a_u64Dst) = u32Tmp; \
7144 } while (0)
7145
7146#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7147 do { \
7148 uint8_t u8Tmp; \
7149 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7150 (a_u16Dst) = (int8_t)u8Tmp; \
7151 } while (0)
7152#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7153 do { \
7154 uint8_t u8Tmp; \
7155 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7156 (a_u32Dst) = (int8_t)u8Tmp; \
7157 } while (0)
7158#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7159 do { \
7160 uint8_t u8Tmp; \
7161 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7162 (a_u64Dst) = (int8_t)u8Tmp; \
7163 } while (0)
7164#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7165 do { \
7166 uint16_t u16Tmp; \
7167 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7168 (a_u32Dst) = (int16_t)u16Tmp; \
7169 } while (0)
7170#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7171 do { \
7172 uint16_t u16Tmp; \
7173 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7174 (a_u64Dst) = (int16_t)u16Tmp; \
7175 } while (0)
7176#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7177 do { \
7178 uint32_t u32Tmp; \
7179 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7180 (a_u64Dst) = (int32_t)u32Tmp; \
7181 } while (0)
7182
7183#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
7184 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
7185#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
7186 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
7187#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
7188 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
7189#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
7190 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
7191
7192#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
7193 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
7194#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
7195 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
7196#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
7197 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
7198#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
7199 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
7200
7201#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
7202#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
7203#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
7204#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
7205#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
7206#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
7207#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
7208 do { \
7209 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
7210 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
7211 } while (0)
7212
7213#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
7214 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7215#define IEM_MC_STORE_MEM_U128_ALIGN(a_iSeg, a_GCPtrMem, a_u128Value) \
7216 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128Aligned(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7217
7218
7219#define IEM_MC_PUSH_U16(a_u16Value) \
7220 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
7221#define IEM_MC_PUSH_U32(a_u32Value) \
7222 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
7223#define IEM_MC_PUSH_U64(a_u64Value) \
7224 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
7225
7226#define IEM_MC_POP_U16(a_pu16Value) \
7227 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
7228#define IEM_MC_POP_U32(a_pu32Value) \
7229 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
7230#define IEM_MC_POP_U64(a_pu64Value) \
7231 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
7232
7233/** Maps guest memory for direct or bounce buffered access.
7234 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7235 * @remarks May return.
7236 */
7237#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
7238 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7239
7240/** Maps guest memory for direct or bounce buffered access.
7241 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7242 * @remarks May return.
7243 */
7244#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
7245 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7246
7247/** Commits the memory and unmaps the guest memory.
7248 * @remarks May return.
7249 */
7250#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
7251 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
7252
7253/** Commits the memory and unmaps the guest memory unless the FPU status word
7254 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
7255 * that would cause FLD not to store.
7256 *
7257 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
7258 * store, while \#P will not.
7259 *
7260 * @remarks May in theory return - for now.
7261 */
7262#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
7263 do { \
7264 if ( !(a_u16FSW & X86_FSW_ES) \
7265 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
7266 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
7267 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
7268 } while (0)
7269
7270/** Calculate efficient address from R/M. */
7271#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
7272 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
7273
7274#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
7275#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
7276#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
7277#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
7278#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
7279#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
7280#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
7281
7282/**
7283 * Defers the rest of the instruction emulation to a C implementation routine
7284 * and returns, only taking the standard parameters.
7285 *
7286 * @param a_pfnCImpl The pointer to the C routine.
7287 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7288 */
7289#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7290
7291/**
7292 * Defers the rest of instruction emulation to a C implementation routine and
7293 * returns, taking one argument in addition to the standard ones.
7294 *
7295 * @param a_pfnCImpl The pointer to the C routine.
7296 * @param a0 The argument.
7297 */
7298#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7299
7300/**
7301 * Defers the rest of the instruction emulation to a C implementation routine
7302 * and returns, taking two arguments in addition to the standard ones.
7303 *
7304 * @param a_pfnCImpl The pointer to the C routine.
7305 * @param a0 The first extra argument.
7306 * @param a1 The second extra argument.
7307 */
7308#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7309
7310/**
7311 * Defers the rest of the instruction emulation to a C implementation routine
7312 * and returns, taking two arguments in addition to the standard ones.
7313 *
7314 * @param a_pfnCImpl The pointer to the C routine.
7315 * @param a0 The first extra argument.
7316 * @param a1 The second extra argument.
7317 * @param a2 The third extra argument.
7318 */
7319#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7320
7321/**
7322 * Defers the rest of the instruction emulation to a C implementation routine
7323 * and returns, taking two arguments in addition to the standard ones.
7324 *
7325 * @param a_pfnCImpl The pointer to the C routine.
7326 * @param a0 The first extra argument.
7327 * @param a1 The second extra argument.
7328 * @param a2 The third extra argument.
7329 * @param a3 The fourth extra argument.
7330 * @param a4 The fifth extra argument.
7331 */
7332#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
7333
7334/**
7335 * Defers the entire instruction emulation to a C implementation routine and
7336 * returns, only taking the standard parameters.
7337 *
7338 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7339 *
7340 * @param a_pfnCImpl The pointer to the C routine.
7341 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7342 */
7343#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7344
7345/**
7346 * Defers the entire instruction emulation to a C implementation routine and
7347 * returns, taking one argument in addition to the standard ones.
7348 *
7349 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7350 *
7351 * @param a_pfnCImpl The pointer to the C routine.
7352 * @param a0 The argument.
7353 */
7354#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7355
7356/**
7357 * Defers the entire instruction emulation to a C implementation routine and
7358 * returns, taking two arguments in addition to the standard ones.
7359 *
7360 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7361 *
7362 * @param a_pfnCImpl The pointer to the C routine.
7363 * @param a0 The first extra argument.
7364 * @param a1 The second extra argument.
7365 */
7366#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7367
7368/**
7369 * Defers the entire instruction emulation to a C implementation routine and
7370 * returns, taking three arguments in addition to the standard ones.
7371 *
7372 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7373 *
7374 * @param a_pfnCImpl The pointer to the C routine.
7375 * @param a0 The first extra argument.
7376 * @param a1 The second extra argument.
7377 * @param a2 The third extra argument.
7378 */
7379#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7380
7381/**
7382 * Calls a FPU assembly implementation taking one visible argument.
7383 *
7384 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7385 * @param a0 The first extra argument.
7386 */
7387#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
7388 do { \
7389 iemFpuPrepareUsage(pIemCpu); \
7390 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
7391 } while (0)
7392
7393/**
7394 * Calls a FPU assembly implementation taking two visible arguments.
7395 *
7396 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7397 * @param a0 The first extra argument.
7398 * @param a1 The second extra argument.
7399 */
7400#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
7401 do { \
7402 iemFpuPrepareUsage(pIemCpu); \
7403 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7404 } while (0)
7405
7406/**
7407 * Calls a FPU assembly implementation taking three visible arguments.
7408 *
7409 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7410 * @param a0 The first extra argument.
7411 * @param a1 The second extra argument.
7412 * @param a2 The third extra argument.
7413 */
7414#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7415 do { \
7416 iemFpuPrepareUsage(pIemCpu); \
7417 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7418 } while (0)
7419
7420#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
7421 do { \
7422 (a_FpuData).FSW = (a_FSW); \
7423 (a_FpuData).r80Result = *(a_pr80Value); \
7424 } while (0)
7425
7426/** Pushes FPU result onto the stack. */
7427#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
7428 iemFpuPushResult(pIemCpu, &a_FpuData)
7429/** Pushes FPU result onto the stack and sets the FPUDP. */
7430#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
7431 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
7432
7433/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
7434#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
7435 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
7436
7437/** Stores FPU result in a stack register. */
7438#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
7439 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
7440/** Stores FPU result in a stack register and pops the stack. */
7441#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
7442 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
7443/** Stores FPU result in a stack register and sets the FPUDP. */
7444#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7445 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7446/** Stores FPU result in a stack register, sets the FPUDP, and pops the
7447 * stack. */
7448#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7449 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7450
7451/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
7452#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
7453 iemFpuUpdateOpcodeAndIp(pIemCpu)
7454/** Free a stack register (for FFREE and FFREEP). */
7455#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
7456 iemFpuStackFree(pIemCpu, a_iStReg)
7457/** Increment the FPU stack pointer. */
7458#define IEM_MC_FPU_STACK_INC_TOP() \
7459 iemFpuStackIncTop(pIemCpu)
7460/** Decrement the FPU stack pointer. */
7461#define IEM_MC_FPU_STACK_DEC_TOP() \
7462 iemFpuStackDecTop(pIemCpu)
7463
7464/** Updates the FSW, FOP, FPUIP, and FPUCS. */
7465#define IEM_MC_UPDATE_FSW(a_u16FSW) \
7466 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7467/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
7468#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
7469 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7470/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
7471#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7472 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7473/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
7474#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
7475 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7476/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
7477 * stack. */
7478#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7479 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7480/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
7481#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
7482 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7483
7484/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
7485#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
7486 iemFpuStackUnderflow(pIemCpu, a_iStDst)
7487/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7488 * stack. */
7489#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
7490 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
7491/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7492 * FPUDS. */
7493#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7494 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7495/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7496 * FPUDS. Pops stack. */
7497#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7498 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7499/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7500 * stack twice. */
7501#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
7502 iemFpuStackUnderflowThenPopPop(pIemCpu)
7503/** Raises a FPU stack underflow exception for an instruction pushing a result
7504 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
7505#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
7506 iemFpuStackPushUnderflow(pIemCpu)
7507/** Raises a FPU stack underflow exception for an instruction pushing a result
7508 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
7509#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
7510 iemFpuStackPushUnderflowTwo(pIemCpu)
7511
7512/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7513 * FPUIP, FPUCS and FOP. */
7514#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
7515 iemFpuStackPushOverflow(pIemCpu)
7516/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7517 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
7518#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
7519 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
7520/** Indicates that we (might) have modified the FPU state. */
7521#define IEM_MC_USED_FPU() \
7522 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
7523
7524/**
7525 * Calls a MMX assembly implementation taking two visible arguments.
7526 *
7527 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7528 * @param a0 The first extra argument.
7529 * @param a1 The second extra argument.
7530 */
7531#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
7532 do { \
7533 iemFpuPrepareUsage(pIemCpu); \
7534 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7535 } while (0)
7536
7537
7538/**
7539 * Calls a SSE assembly implementation taking two visible arguments.
7540 *
7541 * @param a_pfnAImpl Pointer to the assembly MMX routine.
7542 * @param a0 The first extra argument.
7543 * @param a1 The second extra argument.
7544 */
7545#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
7546 do { \
7547 iemFpuPrepareUsageSse(pIemCpu); \
7548 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7549 } while (0)
7550
7551
7552/** @note Not for IOPL or IF testing. */
7553#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
7554/** @note Not for IOPL or IF testing. */
7555#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
7556/** @note Not for IOPL or IF testing. */
7557#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
7558/** @note Not for IOPL or IF testing. */
7559#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
7560/** @note Not for IOPL or IF testing. */
7561#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
7562 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7563 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7564/** @note Not for IOPL or IF testing. */
7565#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7566 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7567 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7568/** @note Not for IOPL or IF testing. */
7569#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7570 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7571 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7572 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7573/** @note Not for IOPL or IF testing. */
7574#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7575 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7576 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7577 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7578#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7579#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7580#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7581/** @note Not for IOPL or IF testing. */
7582#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7583 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7584 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7585/** @note Not for IOPL or IF testing. */
7586#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7587 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7588 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7589/** @note Not for IOPL or IF testing. */
7590#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7591 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7592 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7593/** @note Not for IOPL or IF testing. */
7594#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7595 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7596 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7597/** @note Not for IOPL or IF testing. */
7598#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7599 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7600 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7601/** @note Not for IOPL or IF testing. */
7602#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7603 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7604 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7605#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7606#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7607#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7608 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7609#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7610 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7611#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7612 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7613#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7614 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7615#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7616 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7617#define IEM_MC_IF_FCW_IM() \
7618 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7619
7620#define IEM_MC_ELSE() } else {
7621#define IEM_MC_ENDIF() } do {} while (0)
7622
7623/** @} */
7624
7625
7626/** @name Opcode Debug Helpers.
7627 * @{
7628 */
7629#ifdef DEBUG
7630# define IEMOP_MNEMONIC(a_szMnemonic) \
7631 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7632 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7633# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7634 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7635 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7636#else
7637# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7638# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7639#endif
7640
7641/** @} */
7642
7643
7644/** @name Opcode Helpers.
7645 * @{
7646 */
7647
7648/** The instruction raises an \#UD in real and V8086 mode. */
7649#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7650 do \
7651 { \
7652 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7653 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7654 } while (0)
7655
7656/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7657 * lock prefixed.
7658 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7659#define IEMOP_HLP_NO_LOCK_PREFIX() \
7660 do \
7661 { \
7662 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7663 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7664 } while (0)
7665
7666/** The instruction is not available in 64-bit mode, throw #UD if we're in
7667 * 64-bit mode. */
7668#define IEMOP_HLP_NO_64BIT() \
7669 do \
7670 { \
7671 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7672 return IEMOP_RAISE_INVALID_OPCODE(); \
7673 } while (0)
7674
7675/** The instruction is only available in 64-bit mode, throw #UD if we're not in
7676 * 64-bit mode. */
7677#define IEMOP_HLP_ONLY_64BIT() \
7678 do \
7679 { \
7680 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
7681 return IEMOP_RAISE_INVALID_OPCODE(); \
7682 } while (0)
7683
7684/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7685#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7686 do \
7687 { \
7688 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7689 iemRecalEffOpSize64Default(pIemCpu); \
7690 } while (0)
7691
7692/** The instruction has 64-bit operand size if 64-bit mode. */
7693#define IEMOP_HLP_64BIT_OP_SIZE() \
7694 do \
7695 { \
7696 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7697 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7698 } while (0)
7699
7700/** Only a REX prefix immediately preceeding the first opcode byte takes
7701 * effect. This macro helps ensuring this as well as logging bad guest code. */
7702#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
7703 do \
7704 { \
7705 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
7706 { \
7707 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
7708 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
7709 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
7710 pIemCpu->uRexB = 0; \
7711 pIemCpu->uRexIndex = 0; \
7712 pIemCpu->uRexReg = 0; \
7713 iemRecalEffOpSize(pIemCpu); \
7714 } \
7715 } while (0)
7716
7717/**
7718 * Done decoding.
7719 */
7720#define IEMOP_HLP_DONE_DECODING() \
7721 do \
7722 { \
7723 /*nothing for now, maybe later... */ \
7724 } while (0)
7725
7726/**
7727 * Done decoding, raise \#UD exception if lock prefix present.
7728 */
7729#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7730 do \
7731 { \
7732 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7733 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7734 } while (0)
7735
7736
7737/**
7738 * Calculates the effective address of a ModR/M memory operand.
7739 *
7740 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7741 *
7742 * @return Strict VBox status code.
7743 * @param pIemCpu The IEM per CPU data.
7744 * @param bRm The ModRM byte.
7745 * @param cbImm The size of any immediate following the
7746 * effective address opcode bytes. Important for
7747 * RIP relative addressing.
7748 * @param pGCPtrEff Where to return the effective address.
7749 */
7750static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
7751{
7752 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7753 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7754#define SET_SS_DEF() \
7755 do \
7756 { \
7757 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7758 pIemCpu->iEffSeg = X86_SREG_SS; \
7759 } while (0)
7760
7761 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
7762 {
7763/** @todo Check the effective address size crap! */
7764 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
7765 {
7766 uint16_t u16EffAddr;
7767
7768 /* Handle the disp16 form with no registers first. */
7769 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7770 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7771 else
7772 {
7773 /* Get the displacment. */
7774 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7775 {
7776 case 0: u16EffAddr = 0; break;
7777 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7778 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7779 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7780 }
7781
7782 /* Add the base and index registers to the disp. */
7783 switch (bRm & X86_MODRM_RM_MASK)
7784 {
7785 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7786 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7787 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7788 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7789 case 4: u16EffAddr += pCtx->si; break;
7790 case 5: u16EffAddr += pCtx->di; break;
7791 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7792 case 7: u16EffAddr += pCtx->bx; break;
7793 }
7794 }
7795
7796 *pGCPtrEff = u16EffAddr;
7797 }
7798 else
7799 {
7800 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
7801 uint32_t u32EffAddr;
7802
7803 /* Handle the disp32 form with no registers first. */
7804 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7805 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7806 else
7807 {
7808 /* Get the register (or SIB) value. */
7809 switch ((bRm & X86_MODRM_RM_MASK))
7810 {
7811 case 0: u32EffAddr = pCtx->eax; break;
7812 case 1: u32EffAddr = pCtx->ecx; break;
7813 case 2: u32EffAddr = pCtx->edx; break;
7814 case 3: u32EffAddr = pCtx->ebx; break;
7815 case 4: /* SIB */
7816 {
7817 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7818
7819 /* Get the index and scale it. */
7820 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7821 {
7822 case 0: u32EffAddr = pCtx->eax; break;
7823 case 1: u32EffAddr = pCtx->ecx; break;
7824 case 2: u32EffAddr = pCtx->edx; break;
7825 case 3: u32EffAddr = pCtx->ebx; break;
7826 case 4: u32EffAddr = 0; /*none */ break;
7827 case 5: u32EffAddr = pCtx->ebp; break;
7828 case 6: u32EffAddr = pCtx->esi; break;
7829 case 7: u32EffAddr = pCtx->edi; break;
7830 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7831 }
7832 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7833
7834 /* add base */
7835 switch (bSib & X86_SIB_BASE_MASK)
7836 {
7837 case 0: u32EffAddr += pCtx->eax; break;
7838 case 1: u32EffAddr += pCtx->ecx; break;
7839 case 2: u32EffAddr += pCtx->edx; break;
7840 case 3: u32EffAddr += pCtx->ebx; break;
7841 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7842 case 5:
7843 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7844 {
7845 u32EffAddr += pCtx->ebp;
7846 SET_SS_DEF();
7847 }
7848 else
7849 {
7850 uint32_t u32Disp;
7851 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7852 u32EffAddr += u32Disp;
7853 }
7854 break;
7855 case 6: u32EffAddr += pCtx->esi; break;
7856 case 7: u32EffAddr += pCtx->edi; break;
7857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7858 }
7859 break;
7860 }
7861 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7862 case 6: u32EffAddr = pCtx->esi; break;
7863 case 7: u32EffAddr = pCtx->edi; break;
7864 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7865 }
7866
7867 /* Get and add the displacement. */
7868 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7869 {
7870 case 0:
7871 break;
7872 case 1:
7873 {
7874 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7875 u32EffAddr += i8Disp;
7876 break;
7877 }
7878 case 2:
7879 {
7880 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7881 u32EffAddr += u32Disp;
7882 break;
7883 }
7884 default:
7885 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7886 }
7887
7888 }
7889 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7890 *pGCPtrEff = u32EffAddr;
7891 else
7892 {
7893 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7894 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7895 }
7896 }
7897 }
7898 else
7899 {
7900 uint64_t u64EffAddr;
7901
7902 /* Handle the rip+disp32 form with no registers first. */
7903 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7904 {
7905 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7906 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
7907 }
7908 else
7909 {
7910 /* Get the register (or SIB) value. */
7911 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7912 {
7913 case 0: u64EffAddr = pCtx->rax; break;
7914 case 1: u64EffAddr = pCtx->rcx; break;
7915 case 2: u64EffAddr = pCtx->rdx; break;
7916 case 3: u64EffAddr = pCtx->rbx; break;
7917 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7918 case 6: u64EffAddr = pCtx->rsi; break;
7919 case 7: u64EffAddr = pCtx->rdi; break;
7920 case 8: u64EffAddr = pCtx->r8; break;
7921 case 9: u64EffAddr = pCtx->r9; break;
7922 case 10: u64EffAddr = pCtx->r10; break;
7923 case 11: u64EffAddr = pCtx->r11; break;
7924 case 13: u64EffAddr = pCtx->r13; break;
7925 case 14: u64EffAddr = pCtx->r14; break;
7926 case 15: u64EffAddr = pCtx->r15; break;
7927 /* SIB */
7928 case 4:
7929 case 12:
7930 {
7931 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7932
7933 /* Get the index and scale it. */
7934 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7935 {
7936 case 0: u64EffAddr = pCtx->rax; break;
7937 case 1: u64EffAddr = pCtx->rcx; break;
7938 case 2: u64EffAddr = pCtx->rdx; break;
7939 case 3: u64EffAddr = pCtx->rbx; break;
7940 case 4: u64EffAddr = 0; /*none */ break;
7941 case 5: u64EffAddr = pCtx->rbp; break;
7942 case 6: u64EffAddr = pCtx->rsi; break;
7943 case 7: u64EffAddr = pCtx->rdi; break;
7944 case 8: u64EffAddr = pCtx->r8; break;
7945 case 9: u64EffAddr = pCtx->r9; break;
7946 case 10: u64EffAddr = pCtx->r10; break;
7947 case 11: u64EffAddr = pCtx->r11; break;
7948 case 12: u64EffAddr = pCtx->r12; break;
7949 case 13: u64EffAddr = pCtx->r13; break;
7950 case 14: u64EffAddr = pCtx->r14; break;
7951 case 15: u64EffAddr = pCtx->r15; break;
7952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7953 }
7954 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7955
7956 /* add base */
7957 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7958 {
7959 case 0: u64EffAddr += pCtx->rax; break;
7960 case 1: u64EffAddr += pCtx->rcx; break;
7961 case 2: u64EffAddr += pCtx->rdx; break;
7962 case 3: u64EffAddr += pCtx->rbx; break;
7963 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7964 case 6: u64EffAddr += pCtx->rsi; break;
7965 case 7: u64EffAddr += pCtx->rdi; break;
7966 case 8: u64EffAddr += pCtx->r8; break;
7967 case 9: u64EffAddr += pCtx->r9; break;
7968 case 10: u64EffAddr += pCtx->r10; break;
7969 case 11: u64EffAddr += pCtx->r11; break;
7970 case 12: u64EffAddr += pCtx->r12; break;
7971 case 14: u64EffAddr += pCtx->r14; break;
7972 case 15: u64EffAddr += pCtx->r15; break;
7973 /* complicated encodings */
7974 case 5:
7975 case 13:
7976 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7977 {
7978 if (!pIemCpu->uRexB)
7979 {
7980 u64EffAddr += pCtx->rbp;
7981 SET_SS_DEF();
7982 }
7983 else
7984 u64EffAddr += pCtx->r13;
7985 }
7986 else
7987 {
7988 uint32_t u32Disp;
7989 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7990 u64EffAddr += (int32_t)u32Disp;
7991 }
7992 break;
7993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7994 }
7995 break;
7996 }
7997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7998 }
7999
8000 /* Get and add the displacement. */
8001 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8002 {
8003 case 0:
8004 break;
8005 case 1:
8006 {
8007 int8_t i8Disp;
8008 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8009 u64EffAddr += i8Disp;
8010 break;
8011 }
8012 case 2:
8013 {
8014 uint32_t u32Disp;
8015 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8016 u64EffAddr += (int32_t)u32Disp;
8017 break;
8018 }
8019 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8020 }
8021
8022 }
8023
8024 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
8025 *pGCPtrEff = u64EffAddr;
8026 else
8027 {
8028 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8029 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8030 }
8031 }
8032
8033 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8034 return VINF_SUCCESS;
8035}
8036
8037/** @} */
8038
8039
8040
8041/*
8042 * Include the instructions
8043 */
8044#include "IEMAllInstructions.cpp.h"
8045
8046
8047
8048
8049#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8050
8051/**
8052 * Sets up execution verification mode.
8053 */
8054static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
8055{
8056 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8057 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
8058
8059 /*
8060 * Always note down the address of the current instruction.
8061 */
8062 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
8063 pIemCpu->uOldRip = pOrgCtx->rip;
8064
8065 /*
8066 * Enable verification and/or logging.
8067 */
8068 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
8069 if ( pIemCpu->fNoRem
8070 && ( 0
8071#if 0 /* auto enable on first paged protected mode interrupt */
8072 || ( pOrgCtx->eflags.Bits.u1IF
8073 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
8074 && TRPMHasTrap(pVCpu)
8075 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
8076#endif
8077#if 0
8078 || ( pOrgCtx->cs == 0x10
8079 && ( pOrgCtx->rip == 0x90119e3e
8080 || pOrgCtx->rip == 0x901d9810)
8081#endif
8082#if 0 /* Auto enable DSL - FPU stuff. */
8083 || ( pOrgCtx->cs == 0x10
8084 && (// pOrgCtx->rip == 0xc02ec07f
8085 //|| pOrgCtx->rip == 0xc02ec082
8086 //|| pOrgCtx->rip == 0xc02ec0c9
8087 0
8088 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
8089#endif
8090#if 0 /* Auto enable DSL - fstp st0 stuff. */
8091 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
8092#endif
8093#if 0
8094 || pOrgCtx->rip == 0x9022bb3a
8095#endif
8096#if 0
8097 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
8098#endif
8099#if 0
8100 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
8101 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
8102#endif
8103#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
8104 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
8105 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
8106 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
8107#endif
8108#if 0 /* NT4SP1 - xadd early boot. */
8109 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
8110#endif
8111#if 0 /* NT4SP1 - wrmsr (intel MSR). */
8112 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
8113#endif
8114#if 0 /* NT4SP1 - cmpxchg (AMD). */
8115 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
8116#endif
8117#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
8118 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
8119#endif
8120#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
8121 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
8122
8123#endif
8124#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
8125 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
8126
8127#endif
8128#if 0 /* NT4SP1 - frstor [ecx] */
8129 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
8130#endif
8131#if 0 /* xxxxxx - All long mode code. */
8132 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
8133#endif
8134#if 0 /* rep movsq linux 3.7 64-bit boot. */
8135 || (pOrgCtx->rip == 0x0000000000100241)
8136#endif
8137#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
8138 || (pOrgCtx->rip == 0x000000000215e240)
8139#endif
8140 )
8141 )
8142 {
8143 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
8144 RTLogFlags(NULL, "enabled");
8145 pIemCpu->fNoRem = false;
8146 }
8147
8148 /*
8149 * Switch state.
8150 */
8151 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8152 {
8153 static CPUMCTX s_DebugCtx; /* Ugly! */
8154
8155 s_DebugCtx = *pOrgCtx;
8156 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
8157 }
8158
8159 /*
8160 * See if there is an interrupt pending in TRPM and inject it if we can.
8161 */
8162 pIemCpu->uInjectCpl = UINT8_MAX;
8163 if ( pOrgCtx->eflags.Bits.u1IF
8164 && TRPMHasTrap(pVCpu)
8165 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
8166 {
8167 uint8_t u8TrapNo;
8168 TRPMEVENT enmType;
8169 RTGCUINT uErrCode;
8170 RTGCPTR uCr2;
8171 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
8172 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
8173 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8174 TRPMResetTrap(pVCpu);
8175 pIemCpu->uInjectCpl = pIemCpu->uCpl;
8176 }
8177
8178 /*
8179 * Reset the counters.
8180 */
8181 pIemCpu->cIOReads = 0;
8182 pIemCpu->cIOWrites = 0;
8183 pIemCpu->fIgnoreRaxRdx = false;
8184 pIemCpu->fOverlappingMovs = false;
8185 pIemCpu->fUndefinedEFlags = 0;
8186
8187 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8188 {
8189 /*
8190 * Free all verification records.
8191 */
8192 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
8193 pIemCpu->pIemEvtRecHead = NULL;
8194 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
8195 do
8196 {
8197 while (pEvtRec)
8198 {
8199 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
8200 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
8201 pIemCpu->pFreeEvtRec = pEvtRec;
8202 pEvtRec = pNext;
8203 }
8204 pEvtRec = pIemCpu->pOtherEvtRecHead;
8205 pIemCpu->pOtherEvtRecHead = NULL;
8206 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
8207 } while (pEvtRec);
8208 }
8209}
8210
8211
8212/**
8213 * Allocate an event record.
8214 * @returns Pointer to a record.
8215 */
8216static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
8217{
8218 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8219 return NULL;
8220
8221 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
8222 if (pEvtRec)
8223 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
8224 else
8225 {
8226 if (!pIemCpu->ppIemEvtRecNext)
8227 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
8228
8229 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
8230 if (!pEvtRec)
8231 return NULL;
8232 }
8233 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
8234 pEvtRec->pNext = NULL;
8235 return pEvtRec;
8236}
8237
8238
8239/**
8240 * IOMMMIORead notification.
8241 */
8242VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
8243{
8244 PVMCPU pVCpu = VMMGetCpu(pVM);
8245 if (!pVCpu)
8246 return;
8247 PIEMCPU pIemCpu = &pVCpu->iem.s;
8248 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8249 if (!pEvtRec)
8250 return;
8251 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8252 pEvtRec->u.RamRead.GCPhys = GCPhys;
8253 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
8254 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8255 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8256}
8257
8258
8259/**
8260 * IOMMMIOWrite notification.
8261 */
8262VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
8263{
8264 PVMCPU pVCpu = VMMGetCpu(pVM);
8265 if (!pVCpu)
8266 return;
8267 PIEMCPU pIemCpu = &pVCpu->iem.s;
8268 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8269 if (!pEvtRec)
8270 return;
8271 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8272 pEvtRec->u.RamWrite.GCPhys = GCPhys;
8273 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
8274 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
8275 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
8276 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
8277 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
8278 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8279 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8280}
8281
8282
8283/**
8284 * IOMIOPortRead notification.
8285 */
8286VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
8287{
8288 PVMCPU pVCpu = VMMGetCpu(pVM);
8289 if (!pVCpu)
8290 return;
8291 PIEMCPU pIemCpu = &pVCpu->iem.s;
8292 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8293 if (!pEvtRec)
8294 return;
8295 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8296 pEvtRec->u.IOPortRead.Port = Port;
8297 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8298 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8299 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8300}
8301
8302/**
8303 * IOMIOPortWrite notification.
8304 */
8305VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8306{
8307 PVMCPU pVCpu = VMMGetCpu(pVM);
8308 if (!pVCpu)
8309 return;
8310 PIEMCPU pIemCpu = &pVCpu->iem.s;
8311 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8312 if (!pEvtRec)
8313 return;
8314 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8315 pEvtRec->u.IOPortWrite.Port = Port;
8316 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8317 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8318 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8319 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8320}
8321
8322
8323VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
8324{
8325 AssertFailed();
8326}
8327
8328
8329VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
8330{
8331 AssertFailed();
8332}
8333
8334
8335/**
8336 * Fakes and records an I/O port read.
8337 *
8338 * @returns VINF_SUCCESS.
8339 * @param pIemCpu The IEM per CPU data.
8340 * @param Port The I/O port.
8341 * @param pu32Value Where to store the fake value.
8342 * @param cbValue The size of the access.
8343 */
8344static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8345{
8346 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8347 if (pEvtRec)
8348 {
8349 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8350 pEvtRec->u.IOPortRead.Port = Port;
8351 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8352 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8353 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8354 }
8355 pIemCpu->cIOReads++;
8356 *pu32Value = 0xcccccccc;
8357 return VINF_SUCCESS;
8358}
8359
8360
8361/**
8362 * Fakes and records an I/O port write.
8363 *
8364 * @returns VINF_SUCCESS.
8365 * @param pIemCpu The IEM per CPU data.
8366 * @param Port The I/O port.
8367 * @param u32Value The value being written.
8368 * @param cbValue The size of the access.
8369 */
8370static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8371{
8372 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8373 if (pEvtRec)
8374 {
8375 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8376 pEvtRec->u.IOPortWrite.Port = Port;
8377 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8378 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8379 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8380 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8381 }
8382 pIemCpu->cIOWrites++;
8383 return VINF_SUCCESS;
8384}
8385
8386
8387/**
8388 * Used to add extra details about a stub case.
8389 * @param pIemCpu The IEM per CPU state.
8390 */
8391static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
8392{
8393 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8394 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8395 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8396 char szRegs[4096];
8397 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
8398 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
8399 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
8400 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
8401 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
8402 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
8403 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
8404 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
8405 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
8406 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
8407 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
8408 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
8409 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
8410 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
8411 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
8412 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
8413 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
8414 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
8415 " efer=%016VR{efer}\n"
8416 " pat=%016VR{pat}\n"
8417 " sf_mask=%016VR{sf_mask}\n"
8418 "krnl_gs_base=%016VR{krnl_gs_base}\n"
8419 " lstar=%016VR{lstar}\n"
8420 " star=%016VR{star} cstar=%016VR{cstar}\n"
8421 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
8422 );
8423
8424 char szInstr1[256];
8425 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
8426 DBGF_DISAS_FLAGS_DEFAULT_MODE,
8427 szInstr1, sizeof(szInstr1), NULL);
8428 char szInstr2[256];
8429 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
8430 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8431 szInstr2, sizeof(szInstr2), NULL);
8432
8433 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
8434}
8435
8436
8437/**
8438 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
8439 * dump to the assertion info.
8440 *
8441 * @param pEvtRec The record to dump.
8442 */
8443static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
8444{
8445 switch (pEvtRec->enmEvent)
8446 {
8447 case IEMVERIFYEVENT_IOPORT_READ:
8448 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
8449 pEvtRec->u.IOPortWrite.Port,
8450 pEvtRec->u.IOPortWrite.cbValue);
8451 break;
8452 case IEMVERIFYEVENT_IOPORT_WRITE:
8453 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
8454 pEvtRec->u.IOPortWrite.Port,
8455 pEvtRec->u.IOPortWrite.cbValue,
8456 pEvtRec->u.IOPortWrite.u32Value);
8457 break;
8458 case IEMVERIFYEVENT_RAM_READ:
8459 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
8460 pEvtRec->u.RamRead.GCPhys,
8461 pEvtRec->u.RamRead.cb);
8462 break;
8463 case IEMVERIFYEVENT_RAM_WRITE:
8464 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
8465 pEvtRec->u.RamWrite.GCPhys,
8466 pEvtRec->u.RamWrite.cb,
8467 (int)pEvtRec->u.RamWrite.cb,
8468 pEvtRec->u.RamWrite.ab);
8469 break;
8470 default:
8471 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
8472 break;
8473 }
8474}
8475
8476
8477/**
8478 * Raises an assertion on the specified record, showing the given message with
8479 * a record dump attached.
8480 *
8481 * @param pIemCpu The IEM per CPU data.
8482 * @param pEvtRec1 The first record.
8483 * @param pEvtRec2 The second record.
8484 * @param pszMsg The message explaining why we're asserting.
8485 */
8486static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
8487{
8488 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8489 iemVerifyAssertAddRecordDump(pEvtRec1);
8490 iemVerifyAssertAddRecordDump(pEvtRec2);
8491 iemVerifyAssertMsg2(pIemCpu);
8492 RTAssertPanic();
8493}
8494
8495
8496/**
8497 * Raises an assertion on the specified record, showing the given message with
8498 * a record dump attached.
8499 *
8500 * @param pIemCpu The IEM per CPU data.
8501 * @param pEvtRec1 The first record.
8502 * @param pszMsg The message explaining why we're asserting.
8503 */
8504static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
8505{
8506 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8507 iemVerifyAssertAddRecordDump(pEvtRec);
8508 iemVerifyAssertMsg2(pIemCpu);
8509 RTAssertPanic();
8510}
8511
8512
8513/**
8514 * Verifies a write record.
8515 *
8516 * @param pIemCpu The IEM per CPU data.
8517 * @param pEvtRec The write record.
8518 */
8519static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
8520{
8521 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
8522 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
8523 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
8524 if ( RT_FAILURE(rc)
8525 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
8526 {
8527 /* fend off ins */
8528 if ( !pIemCpu->cIOReads
8529 || pEvtRec->u.RamWrite.ab[0] != 0xcc
8530 || ( pEvtRec->u.RamWrite.cb != 1
8531 && pEvtRec->u.RamWrite.cb != 2
8532 && pEvtRec->u.RamWrite.cb != 4) )
8533 {
8534 /* fend off ROMs */
8535 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
8536 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
8537 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
8538 {
8539 /* fend off fxsave */
8540 if (pEvtRec->u.RamWrite.cb != 512)
8541 {
8542 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8543 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
8544 RTAssertMsg2Add("REM: %.*Rhxs\n"
8545 "IEM: %.*Rhxs\n",
8546 pEvtRec->u.RamWrite.cb, abBuf,
8547 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
8548 iemVerifyAssertAddRecordDump(pEvtRec);
8549 iemVerifyAssertMsg2(pIemCpu);
8550 RTAssertPanic();
8551 }
8552 }
8553 }
8554 }
8555
8556}
8557
8558/**
8559 * Performs the post-execution verfication checks.
8560 */
8561static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
8562{
8563 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8564 return;
8565
8566 /*
8567 * Switch back the state.
8568 */
8569 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
8570 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
8571 Assert(pOrgCtx != pDebugCtx);
8572 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8573
8574 /*
8575 * Execute the instruction in REM.
8576 */
8577 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8578 EMRemLock(pVM);
8579 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
8580 AssertRC(rc);
8581 EMRemUnlock(pVM);
8582
8583 /*
8584 * Compare the register states.
8585 */
8586 unsigned cDiffs = 0;
8587 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
8588 {
8589 //Log(("REM and IEM ends up with different registers!\n"));
8590
8591# define CHECK_FIELD(a_Field) \
8592 do \
8593 { \
8594 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8595 { \
8596 switch (sizeof(pOrgCtx->a_Field)) \
8597 { \
8598 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8599 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8600 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8601 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8602 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8603 } \
8604 cDiffs++; \
8605 } \
8606 } while (0)
8607
8608# define CHECK_BIT_FIELD(a_Field) \
8609 do \
8610 { \
8611 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8612 { \
8613 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8614 cDiffs++; \
8615 } \
8616 } while (0)
8617
8618# define CHECK_SEL(a_Sel) \
8619 do \
8620 { \
8621 CHECK_FIELD(a_Sel.Sel); \
8622 CHECK_FIELD(a_Sel.Attr.u); \
8623 CHECK_FIELD(a_Sel.u64Base); \
8624 CHECK_FIELD(a_Sel.u32Limit); \
8625 CHECK_FIELD(a_Sel.fFlags); \
8626 } while (0)
8627
8628#if 1 /* The recompiler doesn't update these the intel way. */
8629 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8630 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8631 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8632 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8633 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8634 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8635 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8636 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8637 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8638 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8639#endif
8640 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8641 {
8642 RTAssertMsg2Weak(" the FPU state differs\n");
8643 cDiffs++;
8644 CHECK_FIELD(fpu.FCW);
8645 CHECK_FIELD(fpu.FSW);
8646 CHECK_FIELD(fpu.FTW);
8647 CHECK_FIELD(fpu.FOP);
8648 CHECK_FIELD(fpu.FPUIP);
8649 CHECK_FIELD(fpu.CS);
8650 CHECK_FIELD(fpu.Rsrvd1);
8651 CHECK_FIELD(fpu.FPUDP);
8652 CHECK_FIELD(fpu.DS);
8653 CHECK_FIELD(fpu.Rsrvd2);
8654 CHECK_FIELD(fpu.MXCSR);
8655 CHECK_FIELD(fpu.MXCSR_MASK);
8656 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8657 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8658 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8659 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8660 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8661 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8662 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8663 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8664 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8665 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8666 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8667 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8668 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8669 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8670 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8671 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8672 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8673 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8674 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8675 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8676 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8677 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8678 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8679 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8680 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8681 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8682 }
8683 CHECK_FIELD(rip);
8684 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8685 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8686 {
8687 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8688 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8689 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8690 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8691 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8692 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8693 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8694 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8695 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8696 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8697 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8698 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8699 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8700 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8701 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8702 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8703 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8704 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8705 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8706 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8707 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8708 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8709 }
8710
8711 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8712 CHECK_FIELD(rax);
8713 CHECK_FIELD(rcx);
8714 if (!pIemCpu->fIgnoreRaxRdx)
8715 CHECK_FIELD(rdx);
8716 CHECK_FIELD(rbx);
8717 CHECK_FIELD(rsp);
8718 CHECK_FIELD(rbp);
8719 CHECK_FIELD(rsi);
8720 CHECK_FIELD(rdi);
8721 CHECK_FIELD(r8);
8722 CHECK_FIELD(r9);
8723 CHECK_FIELD(r10);
8724 CHECK_FIELD(r11);
8725 CHECK_FIELD(r12);
8726 CHECK_FIELD(r13);
8727 CHECK_SEL(cs);
8728 CHECK_SEL(ss);
8729 CHECK_SEL(ds);
8730 CHECK_SEL(es);
8731 CHECK_SEL(fs);
8732 CHECK_SEL(gs);
8733 CHECK_FIELD(cr0);
8734 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8735 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8736 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8737 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8738 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8739 {
8740 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8741 { /* ignore */ }
8742 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8743 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8744 { /* ignore */ }
8745 else
8746 CHECK_FIELD(cr2);
8747 }
8748 CHECK_FIELD(cr3);
8749 CHECK_FIELD(cr4);
8750 CHECK_FIELD(dr[0]);
8751 CHECK_FIELD(dr[1]);
8752 CHECK_FIELD(dr[2]);
8753 CHECK_FIELD(dr[3]);
8754 CHECK_FIELD(dr[6]);
8755 if ((pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
8756 CHECK_FIELD(dr[7]);
8757 CHECK_FIELD(gdtr.cbGdt);
8758 CHECK_FIELD(gdtr.pGdt);
8759 CHECK_FIELD(idtr.cbIdt);
8760 CHECK_FIELD(idtr.pIdt);
8761 CHECK_SEL(ldtr);
8762 CHECK_SEL(tr);
8763 CHECK_FIELD(SysEnter.cs);
8764 CHECK_FIELD(SysEnter.eip);
8765 CHECK_FIELD(SysEnter.esp);
8766 CHECK_FIELD(msrEFER);
8767 CHECK_FIELD(msrSTAR);
8768 CHECK_FIELD(msrPAT);
8769 CHECK_FIELD(msrLSTAR);
8770 CHECK_FIELD(msrCSTAR);
8771 CHECK_FIELD(msrSFMASK);
8772 CHECK_FIELD(msrKERNELGSBASE);
8773
8774 if (cDiffs != 0)
8775 {
8776 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
8777 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8778 iemVerifyAssertMsg2(pIemCpu);
8779 RTAssertPanic();
8780 }
8781# undef CHECK_FIELD
8782# undef CHECK_BIT_FIELD
8783 }
8784
8785 /*
8786 * If the register state compared fine, check the verification event
8787 * records.
8788 */
8789 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8790 {
8791 /*
8792 * Compare verficiation event records.
8793 * - I/O port accesses should be a 1:1 match.
8794 */
8795 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8796 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8797 while (pIemRec && pOtherRec)
8798 {
8799 /* Since we might miss RAM writes and reads, ignore reads and check
8800 that any written memory is the same extra ones. */
8801 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8802 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8803 && pIemRec->pNext)
8804 {
8805 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8806 iemVerifyWriteRecord(pIemCpu, pIemRec);
8807 pIemRec = pIemRec->pNext;
8808 }
8809
8810 /* Do the compare. */
8811 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8812 {
8813 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8814 break;
8815 }
8816 bool fEquals;
8817 switch (pIemRec->enmEvent)
8818 {
8819 case IEMVERIFYEVENT_IOPORT_READ:
8820 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8821 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8822 break;
8823 case IEMVERIFYEVENT_IOPORT_WRITE:
8824 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8825 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8826 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8827 break;
8828 case IEMVERIFYEVENT_RAM_READ:
8829 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8830 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8831 break;
8832 case IEMVERIFYEVENT_RAM_WRITE:
8833 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8834 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8835 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8836 break;
8837 default:
8838 fEquals = false;
8839 break;
8840 }
8841 if (!fEquals)
8842 {
8843 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8844 break;
8845 }
8846
8847 /* advance */
8848 pIemRec = pIemRec->pNext;
8849 pOtherRec = pOtherRec->pNext;
8850 }
8851
8852 /* Ignore extra writes and reads. */
8853 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8854 {
8855 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8856 iemVerifyWriteRecord(pIemCpu, pIemRec);
8857 pIemRec = pIemRec->pNext;
8858 }
8859 if (pIemRec != NULL)
8860 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8861 else if (pOtherRec != NULL)
8862 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
8863 }
8864 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8865}
8866
8867#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8868
8869/* stubs */
8870static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8871{
8872 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8873 return VERR_INTERNAL_ERROR;
8874}
8875
8876static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8877{
8878 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8879 return VERR_INTERNAL_ERROR;
8880}
8881
8882#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8883
8884
8885/**
8886 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8887 * IEMExecOneWithPrefetchedByPC.
8888 *
8889 * @return Strict VBox status code.
8890 * @param pVCpu The current virtual CPU.
8891 * @param pIemCpu The IEM per CPU data.
8892 * @param fExecuteInhibit If set, execute the instruction following CLI,
8893 * POP SS and MOV SS,GR.
8894 */
8895DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8896{
8897 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8898 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8899 if (rcStrict == VINF_SUCCESS)
8900 pIemCpu->cInstructions++;
8901//#ifdef DEBUG
8902// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8903//#endif
8904
8905 /* Execute the next instruction as well if a cli, pop ss or
8906 mov ss, Gr has just completed successfully. */
8907 if ( fExecuteInhibit
8908 && rcStrict == VINF_SUCCESS
8909 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8910 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8911 {
8912 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8913 if (rcStrict == VINF_SUCCESS)
8914 {
8915 b; IEM_OPCODE_GET_NEXT_U8(&b);
8916 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8917 if (rcStrict == VINF_SUCCESS)
8918 pIemCpu->cInstructions++;
8919 }
8920 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8921 }
8922
8923 /*
8924 * Return value fiddling and statistics.
8925 */
8926 if (rcStrict != VINF_SUCCESS)
8927 {
8928 if (RT_SUCCESS(rcStrict))
8929 {
8930 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8931 int32_t const rcPassUp = pIemCpu->rcPassUp;
8932 if (rcPassUp == VINF_SUCCESS)
8933 pIemCpu->cRetInfStatuses++;
8934 else if ( rcPassUp < VINF_EM_FIRST
8935 || rcPassUp > VINF_EM_LAST
8936 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8937 {
8938 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8939 pIemCpu->cRetPassUpStatus++;
8940 rcStrict = rcPassUp;
8941 }
8942 else
8943 {
8944 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8945 pIemCpu->cRetInfStatuses++;
8946 }
8947 }
8948 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8949 pIemCpu->cRetAspectNotImplemented++;
8950 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8951 pIemCpu->cRetInstrNotImplemented++;
8952#ifdef IEM_VERIFICATION_MODE_FULL
8953 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8954 rcStrict = VINF_SUCCESS;
8955#endif
8956 else
8957 pIemCpu->cRetErrStatuses++;
8958 }
8959 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8960 {
8961 pIemCpu->cRetPassUpStatus++;
8962 rcStrict = pIemCpu->rcPassUp;
8963 }
8964
8965 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8966 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8967#if defined(IEM_VERIFICATION_MODE_FULL)
8968 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8969 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8971 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8972#endif
8973 return rcStrict;
8974}
8975
8976
8977#ifdef IN_RC
8978/**
8979 * Re-enters raw-mode or ensure we return to ring-3.
8980 *
8981 * @returns rcStrict, maybe modified.
8982 * @param pIemCpu The IEM CPU structure.
8983 * @param pVCpu The cross context virtual CPU structure of the caller.
8984 * @param pCtx The current CPU context.
8985 * @param rcStrict The status code returne by the interpreter.
8986 */
8987DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
8988{
8989 if (!pIemCpu->fInPatchCode)
8990 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
8991 return rcStrict;
8992}
8993#endif
8994
8995
8996/**
8997 * Execute one instruction.
8998 *
8999 * @return Strict VBox status code.
9000 * @param pVCpu The current virtual CPU.
9001 */
9002VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
9003{
9004 PIEMCPU pIemCpu = &pVCpu->iem.s;
9005
9006#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9007 iemExecVerificationModeSetup(pIemCpu);
9008#endif
9009#ifdef LOG_ENABLED
9010 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9011# ifdef IN_RING3
9012 if (LogIs2Enabled())
9013 {
9014 char szInstr[256];
9015 uint32_t cbInstr = 0;
9016 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9017 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9018 szInstr, sizeof(szInstr), &cbInstr);
9019
9020 Log2(("**** "
9021 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9022 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9023 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9024 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9025 " %s\n"
9026 ,
9027 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9028 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9029 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9030 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9031 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9032 szInstr));
9033
9034 if (LogIs3Enabled())
9035 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9036 }
9037 else
9038# endif
9039 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9040 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9041#endif
9042
9043 /*
9044 * Do the decoding and emulation.
9045 */
9046 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9047 if (rcStrict == VINF_SUCCESS)
9048 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9049
9050#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9051 /*
9052 * Assert some sanity.
9053 */
9054 iemExecVerificationModeCheck(pIemCpu);
9055#endif
9056#ifdef IN_RC
9057 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9058#endif
9059 if (rcStrict != VINF_SUCCESS)
9060 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9061 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9062 return rcStrict;
9063}
9064
9065
9066VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9067{
9068 PIEMCPU pIemCpu = &pVCpu->iem.s;
9069 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9070 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9071
9072 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9073 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9074 if (rcStrict == VINF_SUCCESS)
9075 {
9076 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9077 if (pcbWritten)
9078 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9079 }
9080
9081#ifdef IN_RC
9082 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9083#endif
9084 return rcStrict;
9085}
9086
9087
9088VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9089 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9090{
9091 PIEMCPU pIemCpu = &pVCpu->iem.s;
9092 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9093 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9094
9095 VBOXSTRICTRC rcStrict;
9096 if ( cbOpcodeBytes
9097 && pCtx->rip == OpcodeBytesPC)
9098 {
9099 iemInitDecoder(pIemCpu, false);
9100 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9101 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9102 rcStrict = VINF_SUCCESS;
9103 }
9104 else
9105 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9106 if (rcStrict == VINF_SUCCESS)
9107 {
9108 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9109 }
9110
9111#ifdef IN_RC
9112 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9113#endif
9114 return rcStrict;
9115}
9116
9117
9118VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9119{
9120 PIEMCPU pIemCpu = &pVCpu->iem.s;
9121 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9122 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9123
9124 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9125 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9126 if (rcStrict == VINF_SUCCESS)
9127 {
9128 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9129 if (pcbWritten)
9130 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9131 }
9132
9133#ifdef IN_RC
9134 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9135#endif
9136 return rcStrict;
9137}
9138
9139
9140VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9141 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9142{
9143 PIEMCPU pIemCpu = &pVCpu->iem.s;
9144 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9145 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9146
9147 VBOXSTRICTRC rcStrict;
9148 if ( cbOpcodeBytes
9149 && pCtx->rip == OpcodeBytesPC)
9150 {
9151 iemInitDecoder(pIemCpu, true);
9152 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9153 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9154 rcStrict = VINF_SUCCESS;
9155 }
9156 else
9157 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9158 if (rcStrict == VINF_SUCCESS)
9159 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9160
9161#ifdef IN_RC
9162 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9163#endif
9164 return rcStrict;
9165}
9166
9167
9168VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
9169{
9170 PIEMCPU pIemCpu = &pVCpu->iem.s;
9171 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9172
9173 /*
9174 * See if there is an interrupt pending in TRPM and inject it if we can.
9175 */
9176#ifdef IEM_VERIFICATION_MODE_FULL
9177 pIemCpu->uInjectCpl = UINT8_MAX;
9178#endif
9179 if ( pCtx->eflags.Bits.u1IF
9180 && TRPMHasTrap(pVCpu)
9181 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
9182 {
9183 uint8_t u8TrapNo;
9184 TRPMEVENT enmType;
9185 RTGCUINT uErrCode;
9186 RTGCPTR uCr2;
9187 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9188 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
9189 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9190 TRPMResetTrap(pVCpu);
9191 }
9192
9193 /*
9194 * Log the state.
9195 */
9196#ifdef LOG_ENABLED
9197# ifdef IN_RING3
9198 if (LogIs2Enabled())
9199 {
9200 char szInstr[256];
9201 uint32_t cbInstr = 0;
9202 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9203 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9204 szInstr, sizeof(szInstr), &cbInstr);
9205
9206 Log2(("**** "
9207 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9208 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9209 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9210 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9211 " %s\n"
9212 ,
9213 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9214 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9215 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9216 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9217 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9218 szInstr));
9219
9220 if (LogIs3Enabled())
9221 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9222 }
9223 else
9224# endif
9225 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9226 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9227#endif
9228
9229 /*
9230 * Do the decoding and emulation.
9231 */
9232 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9233 if (rcStrict == VINF_SUCCESS)
9234 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9235
9236 /*
9237 * Maybe re-enter raw-mode and log.
9238 */
9239#ifdef IN_RC
9240 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9241#endif
9242 if (rcStrict != VINF_SUCCESS)
9243 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9244 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9245 return rcStrict;
9246}
9247
9248
9249
9250/**
9251 * Injects a trap, fault, abort, software interrupt or external interrupt.
9252 *
9253 * The parameter list matches TRPMQueryTrapAll pretty closely.
9254 *
9255 * @returns Strict VBox status code.
9256 * @param pVCpu The current virtual CPU.
9257 * @param u8TrapNo The trap number.
9258 * @param enmType What type is it (trap/fault/abort), software
9259 * interrupt or hardware interrupt.
9260 * @param uErrCode The error code if applicable.
9261 * @param uCr2 The CR2 value if applicable.
9262 */
9263VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
9264{
9265 iemInitDecoder(&pVCpu->iem.s, false);
9266
9267 uint32_t fFlags;
9268 switch (enmType)
9269 {
9270 case TRPM_HARDWARE_INT:
9271 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9272 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9273 uErrCode = uCr2 = 0;
9274 break;
9275
9276 case TRPM_SOFTWARE_INT:
9277 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9278 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9279 uErrCode = uCr2 = 0;
9280 break;
9281
9282 case TRPM_TRAP:
9283 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9284 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9285 if (u8TrapNo == X86_XCPT_PF)
9286 fFlags |= IEM_XCPT_FLAGS_CR2;
9287 switch (u8TrapNo)
9288 {
9289 case X86_XCPT_DF:
9290 case X86_XCPT_TS:
9291 case X86_XCPT_NP:
9292 case X86_XCPT_SS:
9293 case X86_XCPT_PF:
9294 case X86_XCPT_AC:
9295 fFlags |= IEM_XCPT_FLAGS_ERR;
9296 break;
9297 }
9298 break;
9299
9300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9301 }
9302
9303 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
9304}
9305
9306
9307VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9308{
9309 return VERR_NOT_IMPLEMENTED;
9310}
9311
9312
9313VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9314{
9315 return VERR_NOT_IMPLEMENTED;
9316}
9317
9318
9319#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
9320/**
9321 * Executes a IRET instruction with default operand size.
9322 *
9323 * This is for PATM.
9324 *
9325 * @returns VBox status code.
9326 * @param pVCpu The current virtual CPU.
9327 * @param pCtxCore The register frame.
9328 */
9329VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
9330{
9331 PIEMCPU pIemCpu = &pVCpu->iem.s;
9332 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9333
9334 iemCtxCoreToCtx(pCtx, pCtxCore);
9335 iemInitDecoder(pIemCpu);
9336 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
9337 if (rcStrict == VINF_SUCCESS)
9338 iemCtxToCtxCore(pCtxCore, pCtx);
9339 else
9340 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9341 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9342 return rcStrict;
9343}
9344#endif
9345
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette