VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 47307

Last change on this file since 47307 was 47307, checked in by vboxsync, 12 years ago

IEM: Implemented mfence, lfence, sfence.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 329.1 KB
Line 
1/* $Id: IEMAll.cpp 47307 2013-07-22 14:34:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pdm.h>
86#include <VBox/vmm/pgm.h>
87#include <internal/pgm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/em.h>
90#include <VBox/vmm/hm.h>
91#include <VBox/vmm/tm.h>
92#include <VBox/vmm/dbgf.h>
93#ifdef VBOX_WITH_RAW_MODE_NOT_R0
94# include <VBox/vmm/patm.h>
95#endif
96#include "IEMInternal.h"
97#ifdef IEM_VERIFICATION_MODE_FULL
98# include <VBox/vmm/rem.h>
99# include <VBox/vmm/mm.h>
100#endif
101#include <VBox/vmm/vm.h>
102#include <VBox/log.h>
103#include <VBox/err.h>
104#include <VBox/param.h>
105#include <iprt/assert.h>
106#include <iprt/string.h>
107#include <iprt/x86.h>
108
109
110/*******************************************************************************
111* Structures and Typedefs *
112*******************************************************************************/
113/** @typedef PFNIEMOP
114 * Pointer to an opcode decoder function.
115 */
116
117/** @def FNIEMOP_DEF
118 * Define an opcode decoder function.
119 *
120 * We're using macors for this so that adding and removing parameters as well as
121 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
122 *
123 * @param a_Name The function name.
124 */
125
126
127#if defined(__GNUC__) && defined(RT_ARCH_X86)
128typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
129# define FNIEMOP_DEF(a_Name) \
130 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
131# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
132 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
133# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
134 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
135
136#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
137typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
138# define FNIEMOP_DEF(a_Name) \
139 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
140# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
141 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
142# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
143 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
144
145#elif defined(__GNUC__)
146typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
147# define FNIEMOP_DEF(a_Name) \
148 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
149# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
150 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
151# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
152 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
153
154#else
155typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
156# define FNIEMOP_DEF(a_Name) \
157 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
158# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
159 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
160# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
161 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
162
163#endif
164
165
166/**
167 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
168 */
169typedef union IEMSELDESC
170{
171 /** The legacy view. */
172 X86DESC Legacy;
173 /** The long mode view. */
174 X86DESC64 Long;
175} IEMSELDESC;
176/** Pointer to a selector descriptor table entry. */
177typedef IEMSELDESC *PIEMSELDESC;
178
179
180/*******************************************************************************
181* Defined Constants And Macros *
182*******************************************************************************/
183/** @name IEM status codes.
184 *
185 * Not quite sure how this will play out in the end, just aliasing safe status
186 * codes for now.
187 *
188 * @{ */
189#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
190/** @} */
191
192/** Temporary hack to disable the double execution. Will be removed in favor
193 * of a dedicated execution mode in EM. */
194//#define IEM_VERIFICATION_MODE_NO_REM
195
196/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
197 * due to GCC lacking knowledge about the value range of a switch. */
198#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
199
200/**
201 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
202 * occation.
203 */
204#ifdef LOG_ENABLED
205# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
206 do { \
207 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
208 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
209 } while (0)
210#else
211# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
212 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
213#endif
214
215/**
216 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
217 * occation using the supplied logger statement.
218 *
219 * @param a_LoggerArgs What to log on failure.
220 */
221#ifdef LOG_ENABLED
222# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
223 do { \
224 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
225 /*LogFunc(a_LoggerArgs);*/ \
226 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
227 } while (0)
228#else
229# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
230 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
231#endif
232
233/**
234 * Call an opcode decoder function.
235 *
236 * We're using macors for this so that adding and removing parameters can be
237 * done as we please. See FNIEMOP_DEF.
238 */
239#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
240
241/**
242 * Call a common opcode decoder function taking one extra argument.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF_1.
246 */
247#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
256
257/**
258 * Check if we're currently executing in real or virtual 8086 mode.
259 *
260 * @returns @c true if it is, @c false if not.
261 * @param a_pIemCpu The IEM state of the current CPU.
262 */
263#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
264
265/**
266 * Check if we're currently executing in long mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in real mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
283 */
284#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
285
286/**
287 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
288 */
289#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
290
291/**
292 * Tests if at least on of the specified AMD CPUID features (extended) are
293 * marked present.
294 */
295#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
296
297/**
298 * Checks if an Intel CPUID feature is present.
299 */
300#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
301 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
302 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
303
304/**
305 * Checks if an Intel CPUID feature is present in the host CPU.
306 */
307#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
308 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
309
310/**
311 * Evaluates to true if we're presenting an Intel CPU to the guest.
312 */
313#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
314
315/**
316 * Evaluates to true if we're presenting an AMD CPU to the guest.
317 */
318#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
319
320/**
321 * Check if the address is canonical.
322 */
323#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
324
325
326/*******************************************************************************
327* Global Variables *
328*******************************************************************************/
329extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
330
331
332/** Function table for the ADD instruction. */
333static const IEMOPBINSIZES g_iemAImpl_add =
334{
335 iemAImpl_add_u8, iemAImpl_add_u8_locked,
336 iemAImpl_add_u16, iemAImpl_add_u16_locked,
337 iemAImpl_add_u32, iemAImpl_add_u32_locked,
338 iemAImpl_add_u64, iemAImpl_add_u64_locked
339};
340
341/** Function table for the ADC instruction. */
342static const IEMOPBINSIZES g_iemAImpl_adc =
343{
344 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
345 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
346 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
347 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
348};
349
350/** Function table for the SUB instruction. */
351static const IEMOPBINSIZES g_iemAImpl_sub =
352{
353 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
354 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
355 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
356 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
357};
358
359/** Function table for the SBB instruction. */
360static const IEMOPBINSIZES g_iemAImpl_sbb =
361{
362 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
363 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
364 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
365 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
366};
367
368/** Function table for the OR instruction. */
369static const IEMOPBINSIZES g_iemAImpl_or =
370{
371 iemAImpl_or_u8, iemAImpl_or_u8_locked,
372 iemAImpl_or_u16, iemAImpl_or_u16_locked,
373 iemAImpl_or_u32, iemAImpl_or_u32_locked,
374 iemAImpl_or_u64, iemAImpl_or_u64_locked
375};
376
377/** Function table for the XOR instruction. */
378static const IEMOPBINSIZES g_iemAImpl_xor =
379{
380 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
381 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
382 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
383 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
384};
385
386/** Function table for the AND instruction. */
387static const IEMOPBINSIZES g_iemAImpl_and =
388{
389 iemAImpl_and_u8, iemAImpl_and_u8_locked,
390 iemAImpl_and_u16, iemAImpl_and_u16_locked,
391 iemAImpl_and_u32, iemAImpl_and_u32_locked,
392 iemAImpl_and_u64, iemAImpl_and_u64_locked
393};
394
395/** Function table for the CMP instruction.
396 * @remarks Making operand order ASSUMPTIONS.
397 */
398static const IEMOPBINSIZES g_iemAImpl_cmp =
399{
400 iemAImpl_cmp_u8, NULL,
401 iemAImpl_cmp_u16, NULL,
402 iemAImpl_cmp_u32, NULL,
403 iemAImpl_cmp_u64, NULL
404};
405
406/** Function table for the TEST instruction.
407 * @remarks Making operand order ASSUMPTIONS.
408 */
409static const IEMOPBINSIZES g_iemAImpl_test =
410{
411 iemAImpl_test_u8, NULL,
412 iemAImpl_test_u16, NULL,
413 iemAImpl_test_u32, NULL,
414 iemAImpl_test_u64, NULL
415};
416
417/** Function table for the BT instruction. */
418static const IEMOPBINSIZES g_iemAImpl_bt =
419{
420 NULL, NULL,
421 iemAImpl_bt_u16, NULL,
422 iemAImpl_bt_u32, NULL,
423 iemAImpl_bt_u64, NULL
424};
425
426/** Function table for the BTC instruction. */
427static const IEMOPBINSIZES g_iemAImpl_btc =
428{
429 NULL, NULL,
430 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
431 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
432 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
433};
434
435/** Function table for the BTR instruction. */
436static const IEMOPBINSIZES g_iemAImpl_btr =
437{
438 NULL, NULL,
439 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
440 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
441 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
442};
443
444/** Function table for the BTS instruction. */
445static const IEMOPBINSIZES g_iemAImpl_bts =
446{
447 NULL, NULL,
448 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
449 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
450 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
451};
452
453/** Function table for the BSF instruction. */
454static const IEMOPBINSIZES g_iemAImpl_bsf =
455{
456 NULL, NULL,
457 iemAImpl_bsf_u16, NULL,
458 iemAImpl_bsf_u32, NULL,
459 iemAImpl_bsf_u64, NULL
460};
461
462/** Function table for the BSR instruction. */
463static const IEMOPBINSIZES g_iemAImpl_bsr =
464{
465 NULL, NULL,
466 iemAImpl_bsr_u16, NULL,
467 iemAImpl_bsr_u32, NULL,
468 iemAImpl_bsr_u64, NULL
469};
470
471/** Function table for the IMUL instruction. */
472static const IEMOPBINSIZES g_iemAImpl_imul_two =
473{
474 NULL, NULL,
475 iemAImpl_imul_two_u16, NULL,
476 iemAImpl_imul_two_u32, NULL,
477 iemAImpl_imul_two_u64, NULL
478};
479
480/** Group 1 /r lookup table. */
481static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
482{
483 &g_iemAImpl_add,
484 &g_iemAImpl_or,
485 &g_iemAImpl_adc,
486 &g_iemAImpl_sbb,
487 &g_iemAImpl_and,
488 &g_iemAImpl_sub,
489 &g_iemAImpl_xor,
490 &g_iemAImpl_cmp
491};
492
493/** Function table for the INC instruction. */
494static const IEMOPUNARYSIZES g_iemAImpl_inc =
495{
496 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
497 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
498 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
499 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
500};
501
502/** Function table for the DEC instruction. */
503static const IEMOPUNARYSIZES g_iemAImpl_dec =
504{
505 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
506 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
507 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
508 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
509};
510
511/** Function table for the NEG instruction. */
512static const IEMOPUNARYSIZES g_iemAImpl_neg =
513{
514 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
515 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
516 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
517 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
518};
519
520/** Function table for the NOT instruction. */
521static const IEMOPUNARYSIZES g_iemAImpl_not =
522{
523 iemAImpl_not_u8, iemAImpl_not_u8_locked,
524 iemAImpl_not_u16, iemAImpl_not_u16_locked,
525 iemAImpl_not_u32, iemAImpl_not_u32_locked,
526 iemAImpl_not_u64, iemAImpl_not_u64_locked
527};
528
529
530/** Function table for the ROL instruction. */
531static const IEMOPSHIFTSIZES g_iemAImpl_rol =
532{
533 iemAImpl_rol_u8,
534 iemAImpl_rol_u16,
535 iemAImpl_rol_u32,
536 iemAImpl_rol_u64
537};
538
539/** Function table for the ROR instruction. */
540static const IEMOPSHIFTSIZES g_iemAImpl_ror =
541{
542 iemAImpl_ror_u8,
543 iemAImpl_ror_u16,
544 iemAImpl_ror_u32,
545 iemAImpl_ror_u64
546};
547
548/** Function table for the RCL instruction. */
549static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
550{
551 iemAImpl_rcl_u8,
552 iemAImpl_rcl_u16,
553 iemAImpl_rcl_u32,
554 iemAImpl_rcl_u64
555};
556
557/** Function table for the RCR instruction. */
558static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
559{
560 iemAImpl_rcr_u8,
561 iemAImpl_rcr_u16,
562 iemAImpl_rcr_u32,
563 iemAImpl_rcr_u64
564};
565
566/** Function table for the SHL instruction. */
567static const IEMOPSHIFTSIZES g_iemAImpl_shl =
568{
569 iemAImpl_shl_u8,
570 iemAImpl_shl_u16,
571 iemAImpl_shl_u32,
572 iemAImpl_shl_u64
573};
574
575/** Function table for the SHR instruction. */
576static const IEMOPSHIFTSIZES g_iemAImpl_shr =
577{
578 iemAImpl_shr_u8,
579 iemAImpl_shr_u16,
580 iemAImpl_shr_u32,
581 iemAImpl_shr_u64
582};
583
584/** Function table for the SAR instruction. */
585static const IEMOPSHIFTSIZES g_iemAImpl_sar =
586{
587 iemAImpl_sar_u8,
588 iemAImpl_sar_u16,
589 iemAImpl_sar_u32,
590 iemAImpl_sar_u64
591};
592
593
594/** Function table for the MUL instruction. */
595static const IEMOPMULDIVSIZES g_iemAImpl_mul =
596{
597 iemAImpl_mul_u8,
598 iemAImpl_mul_u16,
599 iemAImpl_mul_u32,
600 iemAImpl_mul_u64
601};
602
603/** Function table for the IMUL instruction working implicitly on rAX. */
604static const IEMOPMULDIVSIZES g_iemAImpl_imul =
605{
606 iemAImpl_imul_u8,
607 iemAImpl_imul_u16,
608 iemAImpl_imul_u32,
609 iemAImpl_imul_u64
610};
611
612/** Function table for the DIV instruction. */
613static const IEMOPMULDIVSIZES g_iemAImpl_div =
614{
615 iemAImpl_div_u8,
616 iemAImpl_div_u16,
617 iemAImpl_div_u32,
618 iemAImpl_div_u64
619};
620
621/** Function table for the MUL instruction. */
622static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
623{
624 iemAImpl_idiv_u8,
625 iemAImpl_idiv_u16,
626 iemAImpl_idiv_u32,
627 iemAImpl_idiv_u64
628};
629
630/** Function table for the SHLD instruction */
631static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
632{
633 iemAImpl_shld_u16,
634 iemAImpl_shld_u32,
635 iemAImpl_shld_u64,
636};
637
638/** Function table for the SHRD instruction */
639static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
640{
641 iemAImpl_shrd_u16,
642 iemAImpl_shrd_u32,
643 iemAImpl_shrd_u64,
644};
645
646
647#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
648/** What IEM just wrote. */
649uint8_t g_abIemWrote[256];
650/** How much IEM just wrote. */
651size_t g_cbIemWrote;
652#endif
653
654
655/*******************************************************************************
656* Internal Functions *
657*******************************************************************************/
658static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
659/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
660static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
661static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
662static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
663static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
664static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
665static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
666static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
667static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
668static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
669static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
670static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
671static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
672static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
673static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
674static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
675static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
676static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
677static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
678static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
679static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
680static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
681static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
682
683#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
684static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
685#endif
686static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
687static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
688
689
690/**
691 * Sets the pass up status.
692 *
693 * @returns VINF_SUCCESS.
694 * @param pIemCpu The per CPU IEM state of the calling thread.
695 * @param rcPassUp The pass up status. Must be informational.
696 * VINF_SUCCESS is not allowed.
697 */
698static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
699{
700 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
701
702 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
703 if (rcOldPassUp == VINF_SUCCESS)
704 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
705 /* If both are EM scheduling code, use EM priority rules. */
706 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
707 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
708 {
709 if (rcPassUp < rcOldPassUp)
710 {
711 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
712 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
713 }
714 else
715 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
716 }
717 /* Override EM scheduling with specific status code. */
718 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
719 {
720 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
721 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
722 }
723 /* Don't override specific status code, first come first served. */
724 else
725 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Initializes the decoder state.
732 *
733 * @param pIemCpu The per CPU IEM state.
734 * @param fBypassHandlers Whether to bypass access handlers.
735 */
736DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
737{
738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
739 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
740
741#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
742 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
743 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
744 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
745 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
746 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
747 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
748 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
749 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
750#endif
751
752#ifdef VBOX_WITH_RAW_MODE_NOT_R0
753 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
754#endif
755 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
756#ifdef IEM_VERIFICATION_MODE_FULL
757 if (pIemCpu->uInjectCpl != UINT8_MAX)
758 pIemCpu->uCpl = pIemCpu->uInjectCpl;
759#endif
760 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
761 ? IEMMODE_64BIT
762 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
763 ? IEMMODE_32BIT
764 : IEMMODE_16BIT;
765 pIemCpu->enmCpuMode = enmMode;
766 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
767 pIemCpu->enmEffAddrMode = enmMode;
768 if (enmMode != IEMMODE_64BIT)
769 {
770 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
771 pIemCpu->enmEffOpSize = enmMode;
772 }
773 else
774 {
775 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
776 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
777 }
778 pIemCpu->fPrefixes = 0;
779 pIemCpu->uRexReg = 0;
780 pIemCpu->uRexB = 0;
781 pIemCpu->uRexIndex = 0;
782 pIemCpu->iEffSeg = X86_SREG_DS;
783 pIemCpu->offOpcode = 0;
784 pIemCpu->cbOpcode = 0;
785 pIemCpu->cActiveMappings = 0;
786 pIemCpu->iNextMapping = 0;
787 pIemCpu->rcPassUp = VINF_SUCCESS;
788 pIemCpu->fBypassHandlers = fBypassHandlers;
789#ifdef IN_RC
790 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
791 && pCtx->cs.u64Base == 0
792 && pCtx->cs.u32Limit == UINT32_MAX
793 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
794 if (!pIemCpu->fInPatchCode)
795 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
796#endif
797}
798
799
800/**
801 * Prefetch opcodes the first time when starting executing.
802 *
803 * @returns Strict VBox status code.
804 * @param pIemCpu The IEM state.
805 * @param fBypassHandlers Whether to bypass access handlers.
806 */
807static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
808{
809#ifdef IEM_VERIFICATION_MODE_FULL
810 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
811#endif
812 iemInitDecoder(pIemCpu, fBypassHandlers);
813
814 /*
815 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
816 *
817 * First translate CS:rIP to a physical address.
818 */
819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
820 uint32_t cbToTryRead;
821 RTGCPTR GCPtrPC;
822 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
823 {
824 cbToTryRead = PAGE_SIZE;
825 GCPtrPC = pCtx->rip;
826 if (!IEM_IS_CANONICAL(GCPtrPC))
827 return iemRaiseGeneralProtectionFault0(pIemCpu);
828 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
829 }
830 else
831 {
832 uint32_t GCPtrPC32 = pCtx->eip;
833 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
834 if (GCPtrPC32 > pCtx->cs.u32Limit)
835 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
836 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
837 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
838 }
839
840#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
841 /* Allow interpretation of patch manager code blocks since they can for
842 instance throw #PFs for perfectly good reasons. */
843 if (pIemCpu->fInPatchCode)
844 {
845 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
846 if (cbToTryRead > cbLeftOnPage)
847 cbToTryRead = cbLeftOnPage;
848 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
849 cbToTryRead = sizeof(pIemCpu->abOpcode);
850 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
851 pIemCpu->cbOpcode = cbToTryRead;
852 return VINF_SUCCESS;
853 }
854#endif
855
856 RTGCPHYS GCPhys;
857 uint64_t fFlags;
858 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
859 if (RT_FAILURE(rc))
860 {
861 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
862 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
863 }
864 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
865 {
866 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
867 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
868 }
869 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
870 {
871 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
872 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
873 }
874 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
875 /** @todo Check reserved bits and such stuff. PGM is better at doing
876 * that, so do it when implementing the guest virtual address
877 * TLB... */
878
879#ifdef IEM_VERIFICATION_MODE_FULL
880 /*
881 * Optimistic optimization: Use unconsumed opcode bytes from the previous
882 * instruction.
883 */
884 /** @todo optimize this differently by not using PGMPhysRead. */
885 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
886 pIemCpu->GCPhysOpcodes = GCPhys;
887 if ( offPrevOpcodes < cbOldOpcodes
888 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
889 {
890 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
891 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
892 pIemCpu->cbOpcode = cbNew;
893 return VINF_SUCCESS;
894 }
895#endif
896
897 /*
898 * Read the bytes at this address.
899 */
900 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
901 if (cbToTryRead > cbLeftOnPage)
902 cbToTryRead = cbLeftOnPage;
903 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
904 cbToTryRead = sizeof(pIemCpu->abOpcode);
905 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
906 * doing that. */
907 if (!pIemCpu->fBypassHandlers)
908 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
909 else
910 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
911 if (rc != VINF_SUCCESS)
912 {
913 /** @todo status code handling */
914 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
915 GCPtrPC, GCPhys, rc, cbToTryRead));
916 return rc;
917 }
918 pIemCpu->cbOpcode = cbToTryRead;
919
920 return VINF_SUCCESS;
921}
922
923
924/**
925 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
926 * exception if it fails.
927 *
928 * @returns Strict VBox status code.
929 * @param pIemCpu The IEM state.
930 * @param cbMin Where to return the opcode byte.
931 */
932static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
933{
934 /*
935 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
936 *
937 * First translate CS:rIP to a physical address.
938 */
939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
940 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
941 uint32_t cbToTryRead;
942 RTGCPTR GCPtrNext;
943 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
944 {
945 cbToTryRead = PAGE_SIZE;
946 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
947 if (!IEM_IS_CANONICAL(GCPtrNext))
948 return iemRaiseGeneralProtectionFault0(pIemCpu);
949 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
950 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
951 }
952 else
953 {
954 uint32_t GCPtrNext32 = pCtx->eip;
955 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
956 GCPtrNext32 += pIemCpu->cbOpcode;
957 if (GCPtrNext32 > pCtx->cs.u32Limit)
958 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
959 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
960 if (cbToTryRead < cbMin - cbLeft)
961 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
962 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
963 }
964
965 RTGCPHYS GCPhys;
966 uint64_t fFlags;
967 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
968 if (RT_FAILURE(rc))
969 {
970 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
971 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
972 }
973 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
974 {
975 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
976 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
977 }
978 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
979 {
980 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
981 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
982 }
983 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
984 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
985 /** @todo Check reserved bits and such stuff. PGM is better at doing
986 * that, so do it when implementing the guest virtual address
987 * TLB... */
988
989 /*
990 * Read the bytes at this address.
991 */
992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
993 if (cbToTryRead > cbLeftOnPage)
994 cbToTryRead = cbLeftOnPage;
995 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
996 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
997 Assert(cbToTryRead >= cbMin - cbLeft);
998 if (!pIemCpu->fBypassHandlers)
999 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1000 else
1001 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1002 if (rc != VINF_SUCCESS)
1003 {
1004 /** @todo status code handling */
1005 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1006 return rc;
1007 }
1008 pIemCpu->cbOpcode += cbToTryRead;
1009 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1010
1011 return VINF_SUCCESS;
1012}
1013
1014
1015/**
1016 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pIemCpu The IEM state.
1020 * @param pb Where to return the opcode byte.
1021 */
1022DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1023{
1024 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1025 if (rcStrict == VINF_SUCCESS)
1026 {
1027 uint8_t offOpcode = pIemCpu->offOpcode;
1028 *pb = pIemCpu->abOpcode[offOpcode];
1029 pIemCpu->offOpcode = offOpcode + 1;
1030 }
1031 else
1032 *pb = 0;
1033 return rcStrict;
1034}
1035
1036
1037/**
1038 * Fetches the next opcode byte.
1039 *
1040 * @returns Strict VBox status code.
1041 * @param pIemCpu The IEM state.
1042 * @param pu8 Where to return the opcode byte.
1043 */
1044DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1045{
1046 uint8_t const offOpcode = pIemCpu->offOpcode;
1047 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1048 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1049
1050 *pu8 = pIemCpu->abOpcode[offOpcode];
1051 pIemCpu->offOpcode = offOpcode + 1;
1052 return VINF_SUCCESS;
1053}
1054
1055
1056/**
1057 * Fetches the next opcode byte, returns automatically on failure.
1058 *
1059 * @param a_pu8 Where to return the opcode byte.
1060 * @remark Implicitly references pIemCpu.
1061 */
1062#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1063 do \
1064 { \
1065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1066 if (rcStrict2 != VINF_SUCCESS) \
1067 return rcStrict2; \
1068 } while (0)
1069
1070
1071/**
1072 * Fetches the next signed byte from the opcode stream.
1073 *
1074 * @returns Strict VBox status code.
1075 * @param pIemCpu The IEM state.
1076 * @param pi8 Where to return the signed byte.
1077 */
1078DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1079{
1080 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1081}
1082
1083
1084/**
1085 * Fetches the next signed byte from the opcode stream, returning automatically
1086 * on failure.
1087 *
1088 * @param pi8 Where to return the signed byte.
1089 * @remark Implicitly references pIemCpu.
1090 */
1091#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1092 do \
1093 { \
1094 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1095 if (rcStrict2 != VINF_SUCCESS) \
1096 return rcStrict2; \
1097 } while (0)
1098
1099
1100/**
1101 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1102 *
1103 * @returns Strict VBox status code.
1104 * @param pIemCpu The IEM state.
1105 * @param pu16 Where to return the opcode dword.
1106 */
1107DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1108{
1109 uint8_t u8;
1110 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1111 if (rcStrict == VINF_SUCCESS)
1112 *pu16 = (int8_t)u8;
1113 return rcStrict;
1114}
1115
1116
1117/**
1118 * Fetches the next signed byte from the opcode stream, extending it to
1119 * unsigned 16-bit.
1120 *
1121 * @returns Strict VBox status code.
1122 * @param pIemCpu The IEM state.
1123 * @param pu16 Where to return the unsigned word.
1124 */
1125DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1126{
1127 uint8_t const offOpcode = pIemCpu->offOpcode;
1128 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1129 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1130
1131 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1132 pIemCpu->offOpcode = offOpcode + 1;
1133 return VINF_SUCCESS;
1134}
1135
1136
1137/**
1138 * Fetches the next signed byte from the opcode stream and sign-extending it to
1139 * a word, returning automatically on failure.
1140 *
1141 * @param pu16 Where to return the word.
1142 * @remark Implicitly references pIemCpu.
1143 */
1144#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1145 do \
1146 { \
1147 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1148 if (rcStrict2 != VINF_SUCCESS) \
1149 return rcStrict2; \
1150 } while (0)
1151
1152
1153/**
1154 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1155 *
1156 * @returns Strict VBox status code.
1157 * @param pIemCpu The IEM state.
1158 * @param pu32 Where to return the opcode dword.
1159 */
1160DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1161{
1162 uint8_t u8;
1163 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1164 if (rcStrict == VINF_SUCCESS)
1165 *pu32 = (int8_t)u8;
1166 return rcStrict;
1167}
1168
1169
1170/**
1171 * Fetches the next signed byte from the opcode stream, extending it to
1172 * unsigned 32-bit.
1173 *
1174 * @returns Strict VBox status code.
1175 * @param pIemCpu The IEM state.
1176 * @param pu32 Where to return the unsigned dword.
1177 */
1178DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1179{
1180 uint8_t const offOpcode = pIemCpu->offOpcode;
1181 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1182 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1183
1184 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1185 pIemCpu->offOpcode = offOpcode + 1;
1186 return VINF_SUCCESS;
1187}
1188
1189
1190/**
1191 * Fetches the next signed byte from the opcode stream and sign-extending it to
1192 * a word, returning automatically on failure.
1193 *
1194 * @param pu32 Where to return the word.
1195 * @remark Implicitly references pIemCpu.
1196 */
1197#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1198 do \
1199 { \
1200 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1201 if (rcStrict2 != VINF_SUCCESS) \
1202 return rcStrict2; \
1203 } while (0)
1204
1205
1206/**
1207 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1208 *
1209 * @returns Strict VBox status code.
1210 * @param pIemCpu The IEM state.
1211 * @param pu64 Where to return the opcode qword.
1212 */
1213DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1214{
1215 uint8_t u8;
1216 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1217 if (rcStrict == VINF_SUCCESS)
1218 *pu64 = (int8_t)u8;
1219 return rcStrict;
1220}
1221
1222
1223/**
1224 * Fetches the next signed byte from the opcode stream, extending it to
1225 * unsigned 64-bit.
1226 *
1227 * @returns Strict VBox status code.
1228 * @param pIemCpu The IEM state.
1229 * @param pu64 Where to return the unsigned qword.
1230 */
1231DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1232{
1233 uint8_t const offOpcode = pIemCpu->offOpcode;
1234 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1235 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1236
1237 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1238 pIemCpu->offOpcode = offOpcode + 1;
1239 return VINF_SUCCESS;
1240}
1241
1242
1243/**
1244 * Fetches the next signed byte from the opcode stream and sign-extending it to
1245 * a word, returning automatically on failure.
1246 *
1247 * @param pu64 Where to return the word.
1248 * @remark Implicitly references pIemCpu.
1249 */
1250#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1251 do \
1252 { \
1253 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1254 if (rcStrict2 != VINF_SUCCESS) \
1255 return rcStrict2; \
1256 } while (0)
1257
1258
1259/**
1260 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1261 *
1262 * @returns Strict VBox status code.
1263 * @param pIemCpu The IEM state.
1264 * @param pu16 Where to return the opcode word.
1265 */
1266DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1267{
1268 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1269 if (rcStrict == VINF_SUCCESS)
1270 {
1271 uint8_t offOpcode = pIemCpu->offOpcode;
1272 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1273 pIemCpu->offOpcode = offOpcode + 2;
1274 }
1275 else
1276 *pu16 = 0;
1277 return rcStrict;
1278}
1279
1280
1281/**
1282 * Fetches the next opcode word.
1283 *
1284 * @returns Strict VBox status code.
1285 * @param pIemCpu The IEM state.
1286 * @param pu16 Where to return the opcode word.
1287 */
1288DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1289{
1290 uint8_t const offOpcode = pIemCpu->offOpcode;
1291 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1292 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1293
1294 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1295 pIemCpu->offOpcode = offOpcode + 2;
1296 return VINF_SUCCESS;
1297}
1298
1299
1300/**
1301 * Fetches the next opcode word, returns automatically on failure.
1302 *
1303 * @param a_pu16 Where to return the opcode word.
1304 * @remark Implicitly references pIemCpu.
1305 */
1306#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1307 do \
1308 { \
1309 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1310 if (rcStrict2 != VINF_SUCCESS) \
1311 return rcStrict2; \
1312 } while (0)
1313
1314
1315/**
1316 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1317 *
1318 * @returns Strict VBox status code.
1319 * @param pIemCpu The IEM state.
1320 * @param pu32 Where to return the opcode double word.
1321 */
1322DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1323{
1324 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1325 if (rcStrict == VINF_SUCCESS)
1326 {
1327 uint8_t offOpcode = pIemCpu->offOpcode;
1328 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1329 pIemCpu->offOpcode = offOpcode + 2;
1330 }
1331 else
1332 *pu32 = 0;
1333 return rcStrict;
1334}
1335
1336
1337/**
1338 * Fetches the next opcode word, zero extending it to a double word.
1339 *
1340 * @returns Strict VBox status code.
1341 * @param pIemCpu The IEM state.
1342 * @param pu32 Where to return the opcode double word.
1343 */
1344DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1345{
1346 uint8_t const offOpcode = pIemCpu->offOpcode;
1347 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1348 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1349
1350 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1351 pIemCpu->offOpcode = offOpcode + 2;
1352 return VINF_SUCCESS;
1353}
1354
1355
1356/**
1357 * Fetches the next opcode word and zero extends it to a double word, returns
1358 * automatically on failure.
1359 *
1360 * @param a_pu32 Where to return the opcode double word.
1361 * @remark Implicitly references pIemCpu.
1362 */
1363#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1364 do \
1365 { \
1366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1367 if (rcStrict2 != VINF_SUCCESS) \
1368 return rcStrict2; \
1369 } while (0)
1370
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pIemCpu The IEM state.
1377 * @param pu64 Where to return the opcode quad word.
1378 */
1379DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pIemCpu->offOpcode;
1385 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1386 pIemCpu->offOpcode = offOpcode + 2;
1387 }
1388 else
1389 *pu64 = 0;
1390 return rcStrict;
1391}
1392
1393
1394/**
1395 * Fetches the next opcode word, zero extending it to a quad word.
1396 *
1397 * @returns Strict VBox status code.
1398 * @param pIemCpu The IEM state.
1399 * @param pu64 Where to return the opcode quad word.
1400 */
1401DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1402{
1403 uint8_t const offOpcode = pIemCpu->offOpcode;
1404 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1405 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1406
1407 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1408 pIemCpu->offOpcode = offOpcode + 2;
1409 return VINF_SUCCESS;
1410}
1411
1412
1413/**
1414 * Fetches the next opcode word and zero extends it to a quad word, returns
1415 * automatically on failure.
1416 *
1417 * @param a_pu64 Where to return the opcode quad word.
1418 * @remark Implicitly references pIemCpu.
1419 */
1420#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1421 do \
1422 { \
1423 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1424 if (rcStrict2 != VINF_SUCCESS) \
1425 return rcStrict2; \
1426 } while (0)
1427
1428
1429/**
1430 * Fetches the next signed word from the opcode stream.
1431 *
1432 * @returns Strict VBox status code.
1433 * @param pIemCpu The IEM state.
1434 * @param pi16 Where to return the signed word.
1435 */
1436DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1437{
1438 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1439}
1440
1441
1442/**
1443 * Fetches the next signed word from the opcode stream, returning automatically
1444 * on failure.
1445 *
1446 * @param pi16 Where to return the signed word.
1447 * @remark Implicitly references pIemCpu.
1448 */
1449#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1450 do \
1451 { \
1452 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1453 if (rcStrict2 != VINF_SUCCESS) \
1454 return rcStrict2; \
1455 } while (0)
1456
1457
1458/**
1459 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pIemCpu The IEM state.
1463 * @param pu32 Where to return the opcode dword.
1464 */
1465DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1466{
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pIemCpu->offOpcode;
1471 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1472 pIemCpu->abOpcode[offOpcode + 1],
1473 pIemCpu->abOpcode[offOpcode + 2],
1474 pIemCpu->abOpcode[offOpcode + 3]);
1475 pIemCpu->offOpcode = offOpcode + 4;
1476 }
1477 else
1478 *pu32 = 0;
1479 return rcStrict;
1480}
1481
1482
1483/**
1484 * Fetches the next opcode dword.
1485 *
1486 * @returns Strict VBox status code.
1487 * @param pIemCpu The IEM state.
1488 * @param pu32 Where to return the opcode double word.
1489 */
1490DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1491{
1492 uint8_t const offOpcode = pIemCpu->offOpcode;
1493 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1494 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1495
1496 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1497 pIemCpu->abOpcode[offOpcode + 1],
1498 pIemCpu->abOpcode[offOpcode + 2],
1499 pIemCpu->abOpcode[offOpcode + 3]);
1500 pIemCpu->offOpcode = offOpcode + 4;
1501 return VINF_SUCCESS;
1502}
1503
1504
1505/**
1506 * Fetches the next opcode dword, returns automatically on failure.
1507 *
1508 * @param a_pu32 Where to return the opcode dword.
1509 * @remark Implicitly references pIemCpu.
1510 */
1511#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1512 do \
1513 { \
1514 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1515 if (rcStrict2 != VINF_SUCCESS) \
1516 return rcStrict2; \
1517 } while (0)
1518
1519
1520/**
1521 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1522 *
1523 * @returns Strict VBox status code.
1524 * @param pIemCpu The IEM state.
1525 * @param pu32 Where to return the opcode dword.
1526 */
1527DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1528{
1529 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1530 if (rcStrict == VINF_SUCCESS)
1531 {
1532 uint8_t offOpcode = pIemCpu->offOpcode;
1533 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1534 pIemCpu->abOpcode[offOpcode + 1],
1535 pIemCpu->abOpcode[offOpcode + 2],
1536 pIemCpu->abOpcode[offOpcode + 3]);
1537 pIemCpu->offOpcode = offOpcode + 4;
1538 }
1539 else
1540 *pu64 = 0;
1541 return rcStrict;
1542}
1543
1544
1545/**
1546 * Fetches the next opcode dword, zero extending it to a quad word.
1547 *
1548 * @returns Strict VBox status code.
1549 * @param pIemCpu The IEM state.
1550 * @param pu64 Where to return the opcode quad word.
1551 */
1552DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1553{
1554 uint8_t const offOpcode = pIemCpu->offOpcode;
1555 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1556 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1557
1558 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1559 pIemCpu->abOpcode[offOpcode + 1],
1560 pIemCpu->abOpcode[offOpcode + 2],
1561 pIemCpu->abOpcode[offOpcode + 3]);
1562 pIemCpu->offOpcode = offOpcode + 4;
1563 return VINF_SUCCESS;
1564}
1565
1566
1567/**
1568 * Fetches the next opcode dword and zero extends it to a quad word, returns
1569 * automatically on failure.
1570 *
1571 * @param a_pu64 Where to return the opcode quad word.
1572 * @remark Implicitly references pIemCpu.
1573 */
1574#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1575 do \
1576 { \
1577 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1578 if (rcStrict2 != VINF_SUCCESS) \
1579 return rcStrict2; \
1580 } while (0)
1581
1582
1583/**
1584 * Fetches the next signed double word from the opcode stream.
1585 *
1586 * @returns Strict VBox status code.
1587 * @param pIemCpu The IEM state.
1588 * @param pi32 Where to return the signed double word.
1589 */
1590DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1591{
1592 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1593}
1594
1595/**
1596 * Fetches the next signed double word from the opcode stream, returning
1597 * automatically on failure.
1598 *
1599 * @param pi32 Where to return the signed double word.
1600 * @remark Implicitly references pIemCpu.
1601 */
1602#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1603 do \
1604 { \
1605 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1606 if (rcStrict2 != VINF_SUCCESS) \
1607 return rcStrict2; \
1608 } while (0)
1609
1610
1611/**
1612 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1613 *
1614 * @returns Strict VBox status code.
1615 * @param pIemCpu The IEM state.
1616 * @param pu64 Where to return the opcode qword.
1617 */
1618DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1619{
1620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1621 if (rcStrict == VINF_SUCCESS)
1622 {
1623 uint8_t offOpcode = pIemCpu->offOpcode;
1624 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1625 pIemCpu->abOpcode[offOpcode + 1],
1626 pIemCpu->abOpcode[offOpcode + 2],
1627 pIemCpu->abOpcode[offOpcode + 3]);
1628 pIemCpu->offOpcode = offOpcode + 4;
1629 }
1630 else
1631 *pu64 = 0;
1632 return rcStrict;
1633}
1634
1635
1636/**
1637 * Fetches the next opcode dword, sign extending it into a quad word.
1638 *
1639 * @returns Strict VBox status code.
1640 * @param pIemCpu The IEM state.
1641 * @param pu64 Where to return the opcode quad word.
1642 */
1643DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1644{
1645 uint8_t const offOpcode = pIemCpu->offOpcode;
1646 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1647 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1648
1649 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1650 pIemCpu->abOpcode[offOpcode + 1],
1651 pIemCpu->abOpcode[offOpcode + 2],
1652 pIemCpu->abOpcode[offOpcode + 3]);
1653 *pu64 = i32;
1654 pIemCpu->offOpcode = offOpcode + 4;
1655 return VINF_SUCCESS;
1656}
1657
1658
1659/**
1660 * Fetches the next opcode double word and sign extends it to a quad word,
1661 * returns automatically on failure.
1662 *
1663 * @param a_pu64 Where to return the opcode quad word.
1664 * @remark Implicitly references pIemCpu.
1665 */
1666#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1667 do \
1668 { \
1669 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1670 if (rcStrict2 != VINF_SUCCESS) \
1671 return rcStrict2; \
1672 } while (0)
1673
1674
1675/**
1676 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1677 *
1678 * @returns Strict VBox status code.
1679 * @param pIemCpu The IEM state.
1680 * @param pu64 Where to return the opcode qword.
1681 */
1682DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1683{
1684 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1685 if (rcStrict == VINF_SUCCESS)
1686 {
1687 uint8_t offOpcode = pIemCpu->offOpcode;
1688 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1689 pIemCpu->abOpcode[offOpcode + 1],
1690 pIemCpu->abOpcode[offOpcode + 2],
1691 pIemCpu->abOpcode[offOpcode + 3],
1692 pIemCpu->abOpcode[offOpcode + 4],
1693 pIemCpu->abOpcode[offOpcode + 5],
1694 pIemCpu->abOpcode[offOpcode + 6],
1695 pIemCpu->abOpcode[offOpcode + 7]);
1696 pIemCpu->offOpcode = offOpcode + 8;
1697 }
1698 else
1699 *pu64 = 0;
1700 return rcStrict;
1701}
1702
1703
1704/**
1705 * Fetches the next opcode qword.
1706 *
1707 * @returns Strict VBox status code.
1708 * @param pIemCpu The IEM state.
1709 * @param pu64 Where to return the opcode qword.
1710 */
1711DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1712{
1713 uint8_t const offOpcode = pIemCpu->offOpcode;
1714 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1715 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1716
1717 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1718 pIemCpu->abOpcode[offOpcode + 1],
1719 pIemCpu->abOpcode[offOpcode + 2],
1720 pIemCpu->abOpcode[offOpcode + 3],
1721 pIemCpu->abOpcode[offOpcode + 4],
1722 pIemCpu->abOpcode[offOpcode + 5],
1723 pIemCpu->abOpcode[offOpcode + 6],
1724 pIemCpu->abOpcode[offOpcode + 7]);
1725 pIemCpu->offOpcode = offOpcode + 8;
1726 return VINF_SUCCESS;
1727}
1728
1729
1730/**
1731 * Fetches the next opcode quad word, returns automatically on failure.
1732 *
1733 * @param a_pu64 Where to return the opcode quad word.
1734 * @remark Implicitly references pIemCpu.
1735 */
1736#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1737 do \
1738 { \
1739 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1740 if (rcStrict2 != VINF_SUCCESS) \
1741 return rcStrict2; \
1742 } while (0)
1743
1744
1745/** @name Misc Worker Functions.
1746 * @{
1747 */
1748
1749
1750/**
1751 * Validates a new SS segment.
1752 *
1753 * @returns VBox strict status code.
1754 * @param pIemCpu The IEM per CPU instance data.
1755 * @param pCtx The CPU context.
1756 * @param NewSS The new SS selctor.
1757 * @param uCpl The CPL to load the stack for.
1758 * @param pDesc Where to return the descriptor.
1759 */
1760static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1761{
1762 NOREF(pCtx);
1763
1764 /* Null selectors are not allowed (we're not called for dispatching
1765 interrupts with SS=0 in long mode). */
1766 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1767 {
1768 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1769 return iemRaiseGeneralProtectionFault0(pIemCpu);
1770 }
1771
1772 /*
1773 * Read the descriptor.
1774 */
1775 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1776 if (rcStrict != VINF_SUCCESS)
1777 return rcStrict;
1778
1779 /*
1780 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1781 */
1782 if (!pDesc->Legacy.Gen.u1DescType)
1783 {
1784 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1785 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1786 }
1787
1788 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1789 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1790 {
1791 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1792 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1793 }
1794 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1795 if ((NewSS & X86_SEL_RPL) != uCpl)
1796 {
1797 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1798 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1799 }
1800 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1801 {
1802 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1803 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1804 }
1805
1806 /* Is it there? */
1807 /** @todo testcase: Is this checked before the canonical / limit check below? */
1808 if (!pDesc->Legacy.Gen.u1Present)
1809 {
1810 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1811 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1812 }
1813
1814 return VINF_SUCCESS;
1815}
1816
1817
1818/**
1819 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1820 * not.
1821 *
1822 * @param a_pIemCpu The IEM per CPU data.
1823 * @param a_pCtx The CPU context.
1824 */
1825#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1826# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1827 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1828 ? (a_pCtx)->eflags.u \
1829 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1830#else
1831# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1832 ( (a_pCtx)->eflags.u )
1833#endif
1834
1835/**
1836 * Updates the EFLAGS in the correct manner wrt. PATM.
1837 *
1838 * @param a_pIemCpu The IEM per CPU data.
1839 * @param a_pCtx The CPU context.
1840 */
1841#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1842# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1843 do { \
1844 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1845 (a_pCtx)->eflags.u = (a_fEfl); \
1846 else \
1847 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1848 } while (0)
1849#else
1850# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1851 do { \
1852 (a_pCtx)->eflags.u = (a_fEfl); \
1853 } while (0)
1854#endif
1855
1856
1857/** @} */
1858
1859/** @name Raising Exceptions.
1860 *
1861 * @{
1862 */
1863
1864/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1865 * @{ */
1866/** CPU exception. */
1867#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1868/** External interrupt (from PIC, APIC, whatever). */
1869#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1870/** Software interrupt (int or into, not bound).
1871 * Returns to the following instruction */
1872#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1873/** Takes an error code. */
1874#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1875/** Takes a CR2. */
1876#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1877/** Generated by the breakpoint instruction. */
1878#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1879/** @} */
1880
1881
1882/**
1883 * Loads the specified stack far pointer from the TSS.
1884 *
1885 * @returns VBox strict status code.
1886 * @param pIemCpu The IEM per CPU instance data.
1887 * @param pCtx The CPU context.
1888 * @param uCpl The CPL to load the stack for.
1889 * @param pSelSS Where to return the new stack segment.
1890 * @param puEsp Where to return the new stack pointer.
1891 */
1892static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1893 PRTSEL pSelSS, uint32_t *puEsp)
1894{
1895 VBOXSTRICTRC rcStrict;
1896 Assert(uCpl < 4);
1897 *puEsp = 0; /* make gcc happy */
1898 *pSelSS = 0; /* make gcc happy */
1899
1900 switch (pCtx->tr.Attr.n.u4Type)
1901 {
1902 /*
1903 * 16-bit TSS (X86TSS16).
1904 */
1905 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1906 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1907 {
1908 uint32_t off = uCpl * 4 + 2;
1909 if (off + 4 > pCtx->tr.u32Limit)
1910 {
1911 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1912 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1913 }
1914
1915 uint32_t u32Tmp = 0; /* gcc maybe... */
1916 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1917 if (rcStrict == VINF_SUCCESS)
1918 {
1919 *puEsp = RT_LOWORD(u32Tmp);
1920 *pSelSS = RT_HIWORD(u32Tmp);
1921 return VINF_SUCCESS;
1922 }
1923 break;
1924 }
1925
1926 /*
1927 * 32-bit TSS (X86TSS32).
1928 */
1929 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1930 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1931 {
1932 uint32_t off = uCpl * 8 + 4;
1933 if (off + 7 > pCtx->tr.u32Limit)
1934 {
1935 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1936 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1937 }
1938
1939 uint64_t u64Tmp;
1940 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1941 if (rcStrict == VINF_SUCCESS)
1942 {
1943 *puEsp = u64Tmp & UINT32_MAX;
1944 *pSelSS = (RTSEL)(u64Tmp >> 32);
1945 return VINF_SUCCESS;
1946 }
1947 break;
1948 }
1949
1950 default:
1951 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1952 }
1953 return rcStrict;
1954}
1955
1956
1957/**
1958 * Loads the specified stack pointer from the 64-bit TSS.
1959 *
1960 * @returns VBox strict status code.
1961 * @param pIemCpu The IEM per CPU instance data.
1962 * @param pCtx The CPU context.
1963 * @param uCpl The CPL to load the stack for.
1964 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1965 * @param puRsp Where to return the new stack pointer.
1966 */
1967static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
1968 uint64_t *puRsp)
1969{
1970 Assert(uCpl < 4);
1971 Assert(uIst < 8);
1972 *puRsp = 0; /* make gcc happy */
1973
1974 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
1975
1976 uint32_t off;
1977 if (uIst)
1978 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
1979 else
1980 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
1981 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
1982 {
1983 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
1984 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1985 }
1986
1987 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
1988}
1989
1990
1991/**
1992 * Adjust the CPU state according to the exception being raised.
1993 *
1994 * @param pCtx The CPU context.
1995 * @param u8Vector The exception that has been raised.
1996 */
1997DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1998{
1999 switch (u8Vector)
2000 {
2001 case X86_XCPT_DB:
2002 pCtx->dr[7] &= ~X86_DR7_GD;
2003 break;
2004 /** @todo Read the AMD and Intel exception reference... */
2005 }
2006}
2007
2008
2009/**
2010 * Implements exceptions and interrupts for real mode.
2011 *
2012 * @returns VBox strict status code.
2013 * @param pIemCpu The IEM per CPU instance data.
2014 * @param pCtx The CPU context.
2015 * @param cbInstr The number of bytes to offset rIP by in the return
2016 * address.
2017 * @param u8Vector The interrupt / exception vector number.
2018 * @param fFlags The flags.
2019 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2020 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2021 */
2022static VBOXSTRICTRC
2023iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2024 PCPUMCTX pCtx,
2025 uint8_t cbInstr,
2026 uint8_t u8Vector,
2027 uint32_t fFlags,
2028 uint16_t uErr,
2029 uint64_t uCr2)
2030{
2031 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2032 NOREF(uErr); NOREF(uCr2);
2033
2034 /*
2035 * Read the IDT entry.
2036 */
2037 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2038 {
2039 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2040 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2041 }
2042 RTFAR16 Idte;
2043 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2044 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2045 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2046 return rcStrict;
2047
2048 /*
2049 * Push the stack frame.
2050 */
2051 uint16_t *pu16Frame;
2052 uint64_t uNewRsp;
2053 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2054 if (rcStrict != VINF_SUCCESS)
2055 return rcStrict;
2056
2057 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2058 pu16Frame[2] = (uint16_t)fEfl;
2059 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2060 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2061 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2062 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2063 return rcStrict;
2064
2065 /*
2066 * Load the vector address into cs:ip and make exception specific state
2067 * adjustments.
2068 */
2069 pCtx->cs.Sel = Idte.sel;
2070 pCtx->cs.ValidSel = Idte.sel;
2071 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2072 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2073 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2074 pCtx->rip = Idte.off;
2075 fEfl &= ~X86_EFL_IF;
2076 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2077
2078 /** @todo do we actually do this in real mode? */
2079 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2080 iemRaiseXcptAdjustState(pCtx, u8Vector);
2081
2082 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2083}
2084
2085
2086/**
2087 * Implements exceptions and interrupts for protected mode.
2088 *
2089 * @returns VBox strict status code.
2090 * @param pIemCpu The IEM per CPU instance data.
2091 * @param pCtx The CPU context.
2092 * @param cbInstr The number of bytes to offset rIP by in the return
2093 * address.
2094 * @param u8Vector The interrupt / exception vector number.
2095 * @param fFlags The flags.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 */
2099static VBOXSTRICTRC
2100iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2101 PCPUMCTX pCtx,
2102 uint8_t cbInstr,
2103 uint8_t u8Vector,
2104 uint32_t fFlags,
2105 uint16_t uErr,
2106 uint64_t uCr2)
2107{
2108 NOREF(cbInstr);
2109
2110 /*
2111 * Read the IDT entry.
2112 */
2113 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2114 {
2115 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2116 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2117 }
2118 X86DESC Idte;
2119 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2120 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2121 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2122 return rcStrict;
2123 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2124 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2125 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2126
2127 /*
2128 * Check the descriptor type, DPL and such.
2129 * ASSUMES this is done in the same order as described for call-gate calls.
2130 */
2131 if (Idte.Gate.u1DescType)
2132 {
2133 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2134 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2135 }
2136 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2137 switch (Idte.Gate.u4Type)
2138 {
2139 case X86_SEL_TYPE_SYS_UNDEFINED:
2140 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2141 case X86_SEL_TYPE_SYS_LDT:
2142 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2143 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2144 case X86_SEL_TYPE_SYS_UNDEFINED2:
2145 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2146 case X86_SEL_TYPE_SYS_UNDEFINED3:
2147 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2148 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2149 case X86_SEL_TYPE_SYS_UNDEFINED4:
2150 {
2151 /** @todo check what actually happens when the type is wrong...
2152 * esp. call gates. */
2153 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2154 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2155 }
2156
2157 case X86_SEL_TYPE_SYS_286_INT_GATE:
2158 case X86_SEL_TYPE_SYS_386_INT_GATE:
2159 fEflToClear |= X86_EFL_IF;
2160 break;
2161
2162 case X86_SEL_TYPE_SYS_TASK_GATE:
2163 /** @todo task gates. */
2164 AssertFailedReturn(VERR_NOT_SUPPORTED);
2165
2166 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2167 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2168 break;
2169
2170 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2171 }
2172
2173 /* Check DPL against CPL if applicable. */
2174 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2175 {
2176 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2177 {
2178 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2179 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2180 }
2181 }
2182
2183 /* Is it there? */
2184 if (!Idte.Gate.u1Present)
2185 {
2186 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2187 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2188 }
2189
2190 /* A null CS is bad. */
2191 RTSEL NewCS = Idte.Gate.u16Sel;
2192 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2193 {
2194 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2195 return iemRaiseGeneralProtectionFault0(pIemCpu);
2196 }
2197
2198 /* Fetch the descriptor for the new CS. */
2199 IEMSELDESC DescCS;
2200 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2201 if (rcStrict != VINF_SUCCESS)
2202 {
2203 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2204 return rcStrict;
2205 }
2206
2207 /* Must be a code segment. */
2208 if (!DescCS.Legacy.Gen.u1DescType)
2209 {
2210 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2211 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2212 }
2213 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2214 {
2215 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2216 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2217 }
2218
2219 /* Don't allow lowering the privilege level. */
2220 /** @todo Does the lowering of privileges apply to software interrupts
2221 * only? This has bearings on the more-privileged or
2222 * same-privilege stack behavior further down. A testcase would
2223 * be nice. */
2224 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2225 {
2226 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2227 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2228 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2229 }
2230
2231 /* Make sure the selector is present. */
2232 if (!DescCS.Legacy.Gen.u1Present)
2233 {
2234 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2235 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2236 }
2237
2238 /* Check the new EIP against the new CS limit. */
2239 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2240 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2241 ? Idte.Gate.u16OffsetLow
2242 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2243 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2244 if (uNewEip > cbLimitCS)
2245 {
2246 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2247 u8Vector, uNewEip, cbLimitCS, NewCS));
2248 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2249 }
2250
2251 /*
2252 * If the privilege level changes, we need to get a new stack from the TSS.
2253 * This in turns means validating the new SS and ESP...
2254 */
2255 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2256 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2257 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2258 if (uNewCpl != pIemCpu->uCpl)
2259 {
2260 RTSEL NewSS;
2261 uint32_t uNewEsp;
2262 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2263 if (rcStrict != VINF_SUCCESS)
2264 return rcStrict;
2265
2266 IEMSELDESC DescSS;
2267 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2268 if (rcStrict != VINF_SUCCESS)
2269 return rcStrict;
2270
2271 /* Check that there is sufficient space for the stack frame. */
2272 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2273 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2274 {
2275 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2276 }
2277
2278 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2279 if ( uNewEsp - 1 > cbLimitSS
2280 || uNewEsp < cbStackFrame)
2281 {
2282 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2283 u8Vector, NewSS, uNewEsp, cbStackFrame));
2284 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2285 }
2286
2287 /*
2288 * Start making changes.
2289 */
2290
2291 /* Create the stack frame. */
2292 RTPTRUNION uStackFrame;
2293 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2294 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2295 if (rcStrict != VINF_SUCCESS)
2296 return rcStrict;
2297 void * const pvStackFrame = uStackFrame.pv;
2298
2299 if (fFlags & IEM_XCPT_FLAGS_ERR)
2300 *uStackFrame.pu32++ = uErr;
2301 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
2302 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2303 uStackFrame.pu32[2] = fEfl;
2304 uStackFrame.pu32[3] = pCtx->esp;
2305 uStackFrame.pu32[4] = pCtx->ss.Sel;
2306 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2307 if (rcStrict != VINF_SUCCESS)
2308 return rcStrict;
2309
2310 /* Mark the selectors 'accessed' (hope this is the correct time). */
2311 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2312 * after pushing the stack frame? (Write protect the gdt + stack to
2313 * find out.) */
2314 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2315 {
2316 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2317 if (rcStrict != VINF_SUCCESS)
2318 return rcStrict;
2319 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2320 }
2321
2322 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2323 {
2324 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2325 if (rcStrict != VINF_SUCCESS)
2326 return rcStrict;
2327 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2328 }
2329
2330 /*
2331 * Start comitting the register changes (joins with the DPL=CPL branch).
2332 */
2333 pCtx->ss.Sel = NewSS;
2334 pCtx->ss.ValidSel = NewSS;
2335 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2336 pCtx->ss.u32Limit = cbLimitSS;
2337 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2338 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2339 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2340 pIemCpu->uCpl = uNewCpl;
2341 }
2342 /*
2343 * Same privilege, no stack change and smaller stack frame.
2344 */
2345 else
2346 {
2347 uint64_t uNewRsp;
2348 RTPTRUNION uStackFrame;
2349 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2350 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2351 if (rcStrict != VINF_SUCCESS)
2352 return rcStrict;
2353 void * const pvStackFrame = uStackFrame.pv;
2354
2355 if (fFlags & IEM_XCPT_FLAGS_ERR)
2356 *uStackFrame.pu32++ = uErr;
2357 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2358 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2359 uStackFrame.pu32[2] = fEfl;
2360 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2361 if (rcStrict != VINF_SUCCESS)
2362 return rcStrict;
2363
2364 /* Mark the CS selector as 'accessed'. */
2365 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2366 {
2367 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2368 if (rcStrict != VINF_SUCCESS)
2369 return rcStrict;
2370 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2371 }
2372
2373 /*
2374 * Start committing the register changes (joins with the other branch).
2375 */
2376 pCtx->rsp = uNewRsp;
2377 }
2378
2379 /* ... register committing continues. */
2380 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2381 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2382 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2383 pCtx->cs.u32Limit = cbLimitCS;
2384 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2385 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2386
2387 pCtx->rip = uNewEip;
2388 fEfl &= ~fEflToClear;
2389 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2390
2391 if (fFlags & IEM_XCPT_FLAGS_CR2)
2392 pCtx->cr2 = uCr2;
2393
2394 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2395 iemRaiseXcptAdjustState(pCtx, u8Vector);
2396
2397 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2398}
2399
2400
2401/**
2402 * Implements exceptions and interrupts for V8086 mode.
2403 *
2404 * @returns VBox strict status code.
2405 * @param pIemCpu The IEM per CPU instance data.
2406 * @param pCtx The CPU context.
2407 * @param cbInstr The number of bytes to offset rIP by in the return
2408 * address.
2409 * @param u8Vector The interrupt / exception vector number.
2410 * @param fFlags The flags.
2411 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2412 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2413 */
2414static VBOXSTRICTRC
2415iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2416 PCPUMCTX pCtx,
2417 uint8_t cbInstr,
2418 uint8_t u8Vector,
2419 uint32_t fFlags,
2420 uint16_t uErr,
2421 uint64_t uCr2)
2422{
2423 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2424 /** @todo implement me. */
2425 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2426}
2427
2428
2429/**
2430 * Implements exceptions and interrupts for long mode.
2431 *
2432 * @returns VBox strict status code.
2433 * @param pIemCpu The IEM per CPU instance data.
2434 * @param pCtx The CPU context.
2435 * @param cbInstr The number of bytes to offset rIP by in the return
2436 * address.
2437 * @param u8Vector The interrupt / exception vector number.
2438 * @param fFlags The flags.
2439 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2440 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2441 */
2442static VBOXSTRICTRC
2443iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2444 PCPUMCTX pCtx,
2445 uint8_t cbInstr,
2446 uint8_t u8Vector,
2447 uint32_t fFlags,
2448 uint16_t uErr,
2449 uint64_t uCr2)
2450{
2451 NOREF(cbInstr);
2452
2453 /*
2454 * Read the IDT entry.
2455 */
2456 uint16_t offIdt = (uint16_t)u8Vector << 4;
2457 if (pCtx->idtr.cbIdt < offIdt + 7)
2458 {
2459 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2460 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2461 }
2462 X86DESC64 Idte;
2463 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
2464 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2465 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
2466 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2467 return rcStrict;
2468 Log(("iemiemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2469 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2470 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2471
2472 /*
2473 * Check the descriptor type, DPL and such.
2474 * ASSUMES this is done in the same order as described for call-gate calls.
2475 */
2476 if (Idte.Gate.u1DescType)
2477 {
2478 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2479 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2480 }
2481 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2482 switch (Idte.Gate.u4Type)
2483 {
2484 case AMD64_SEL_TYPE_SYS_INT_GATE:
2485 fEflToClear |= X86_EFL_IF;
2486 break;
2487 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2488 break;
2489
2490 default:
2491 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2492 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2493 }
2494
2495 /* Check DPL against CPL if applicable. */
2496 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2497 {
2498 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2499 {
2500 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2501 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2502 }
2503 }
2504
2505 /* Is it there? */
2506 if (!Idte.Gate.u1Present)
2507 {
2508 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2509 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2510 }
2511
2512 /* A null CS is bad. */
2513 RTSEL NewCS = Idte.Gate.u16Sel;
2514 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2515 {
2516 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2517 return iemRaiseGeneralProtectionFault0(pIemCpu);
2518 }
2519
2520 /* Fetch the descriptor for the new CS. */
2521 IEMSELDESC DescCS;
2522 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2523 if (rcStrict != VINF_SUCCESS)
2524 {
2525 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2526 return rcStrict;
2527 }
2528
2529 /* Must be a 64-bit code segment. */
2530 if (!DescCS.Long.Gen.u1DescType)
2531 {
2532 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2533 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2534 }
2535 if ( !DescCS.Long.Gen.u1Long
2536 || DescCS.Long.Gen.u1DefBig
2537 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2538 {
2539 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2540 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2541 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2542 }
2543
2544 /* Don't allow lowering the privilege level. For non-conforming CS
2545 selectors, the CS.DPL sets the privilege level the trap/interrupt
2546 handler runs at. For conforming CS selectors, the CPL remains
2547 unchanged, but the CS.DPL must be <= CPL. */
2548 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2549 * when CPU in Ring-0. Result \#GP? */
2550 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2551 {
2552 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2553 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2554 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2555 }
2556
2557
2558 /* Make sure the selector is present. */
2559 if (!DescCS.Legacy.Gen.u1Present)
2560 {
2561 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2562 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2563 }
2564
2565 /* Check that the new RIP is canonical. */
2566 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2567 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2568 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2569 if (!IEM_IS_CANONICAL(uNewRip))
2570 {
2571 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2572 return iemRaiseGeneralProtectionFault0(pIemCpu);
2573 }
2574
2575 /*
2576 * If the privilege level changes or if the IST isn't zero, we need to get
2577 * a new stack from the TSS.
2578 */
2579 uint64_t uNewRsp;
2580 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2581 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2582 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2583 if ( uNewCpl != pIemCpu->uCpl
2584 || Idte.Gate.u3IST != 0)
2585 {
2586 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2587 if (rcStrict != VINF_SUCCESS)
2588 return rcStrict;
2589 }
2590 else
2591 uNewRsp = pCtx->rsp;
2592 uNewRsp &= ~(uint64_t)0xf;
2593
2594 /*
2595 * Start making changes.
2596 */
2597
2598 /* Create the stack frame. */
2599 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2600 RTPTRUNION uStackFrame;
2601 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2602 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2603 if (rcStrict != VINF_SUCCESS)
2604 return rcStrict;
2605 void * const pvStackFrame = uStackFrame.pv;
2606
2607 if (fFlags & IEM_XCPT_FLAGS_ERR)
2608 *uStackFrame.pu64++ = uErr;
2609 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
2610 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
2611 uStackFrame.pu64[2] = fEfl;
2612 uStackFrame.pu64[3] = pCtx->rsp;
2613 uStackFrame.pu64[4] = pCtx->ss.Sel;
2614 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2615 if (rcStrict != VINF_SUCCESS)
2616 return rcStrict;
2617
2618 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2619 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2620 * after pushing the stack frame? (Write protect the gdt + stack to
2621 * find out.) */
2622 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2623 {
2624 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2625 if (rcStrict != VINF_SUCCESS)
2626 return rcStrict;
2627 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2628 }
2629
2630 /*
2631 * Start comitting the register changes.
2632 */
2633 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2634 * hidden registers when interrupting 32-bit or 16-bit code! */
2635 if (uNewCpl != pIemCpu->uCpl)
2636 {
2637 pCtx->ss.Sel = 0 | uNewCpl;
2638 pCtx->ss.ValidSel = 0 | uNewCpl;
2639 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2640 pCtx->ss.u32Limit = UINT32_MAX;
2641 pCtx->ss.u64Base = 0;
2642 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2643 }
2644 pCtx->rsp = uNewRsp - cbStackFrame;
2645 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2646 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2647 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2648 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2649 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2650 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2651 pCtx->rip = uNewRip;
2652 pIemCpu->uCpl = uNewCpl;
2653
2654 fEfl &= ~fEflToClear;
2655 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2656
2657 if (fFlags & IEM_XCPT_FLAGS_CR2)
2658 pCtx->cr2 = uCr2;
2659
2660 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2661 iemRaiseXcptAdjustState(pCtx, u8Vector);
2662
2663 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2664}
2665
2666
2667/**
2668 * Implements exceptions and interrupts.
2669 *
2670 * All exceptions and interrupts goes thru this function!
2671 *
2672 * @returns VBox strict status code.
2673 * @param pIemCpu The IEM per CPU instance data.
2674 * @param cbInstr The number of bytes to offset rIP by in the return
2675 * address.
2676 * @param u8Vector The interrupt / exception vector number.
2677 * @param fFlags The flags.
2678 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2679 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2680 */
2681DECL_NO_INLINE(static, VBOXSTRICTRC)
2682iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2683 uint8_t cbInstr,
2684 uint8_t u8Vector,
2685 uint32_t fFlags,
2686 uint16_t uErr,
2687 uint64_t uCr2)
2688{
2689 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2690
2691 /*
2692 * Do recursion accounting.
2693 */
2694 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2695 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2696 if (pIemCpu->cXcptRecursions == 0)
2697 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2698 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2699 else
2700 {
2701 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2702 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2703
2704 /** @todo double and tripple faults. */
2705 if (pIemCpu->cXcptRecursions >= 3)
2706 {
2707#ifdef DEBUG_bird
2708 AssertFailed();
2709#endif
2710 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2711 }
2712
2713 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2714 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2715 {
2716 ....
2717 } */
2718 }
2719 pIemCpu->cXcptRecursions++;
2720 pIemCpu->uCurXcpt = u8Vector;
2721 pIemCpu->fCurXcpt = fFlags;
2722
2723 /*
2724 * Extensive logging.
2725 */
2726#if defined(LOG_ENABLED) && defined(IN_RING3)
2727 if (LogIs3Enabled())
2728 {
2729 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2730 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2731 char szRegs[4096];
2732 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2733 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2734 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2735 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2736 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2737 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2738 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2739 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2740 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2741 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2742 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2743 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2744 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2745 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2746 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2747 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2748 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2749 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2750 " efer=%016VR{efer}\n"
2751 " pat=%016VR{pat}\n"
2752 " sf_mask=%016VR{sf_mask}\n"
2753 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2754 " lstar=%016VR{lstar}\n"
2755 " star=%016VR{star} cstar=%016VR{cstar}\n"
2756 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2757 );
2758
2759 char szInstr[256];
2760 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2761 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2762 szInstr, sizeof(szInstr), NULL);
2763 Log3(("%s%s\n", szRegs, szInstr));
2764 }
2765#endif /* LOG_ENABLED */
2766
2767 /*
2768 * Call the mode specific worker function.
2769 */
2770 VBOXSTRICTRC rcStrict;
2771 if (!(pCtx->cr0 & X86_CR0_PE))
2772 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2773 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2774 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2775 else if (!pCtx->eflags.Bits.u1VM)
2776 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2777 else
2778 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2779
2780 /*
2781 * Unwind.
2782 */
2783 pIemCpu->cXcptRecursions--;
2784 pIemCpu->uCurXcpt = uPrevXcpt;
2785 pIemCpu->fCurXcpt = fPrevXcpt;
2786 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2787 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2788 return rcStrict;
2789}
2790
2791
2792/** \#DE - 00. */
2793DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2794{
2795 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2796}
2797
2798
2799/** \#DB - 01. */
2800DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2801{
2802 /** @todo set/clear RF. */
2803 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2804}
2805
2806
2807/** \#UD - 06. */
2808DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2809{
2810 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2811}
2812
2813
2814/** \#NM - 07. */
2815DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2816{
2817 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2818}
2819
2820
2821#ifdef SOME_UNUSED_FUNCTION
2822/** \#TS(err) - 0a. */
2823DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2824{
2825 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2826}
2827#endif
2828
2829
2830/** \#TS(tr) - 0a. */
2831DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2832{
2833 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2834 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2835}
2836
2837
2838/** \#NP(err) - 0b. */
2839DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2840{
2841 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2842}
2843
2844
2845/** \#NP(seg) - 0b. */
2846DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2847{
2848 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2849 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2850}
2851
2852
2853/** \#NP(sel) - 0b. */
2854DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2855{
2856 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2857 uSel & ~X86_SEL_RPL, 0);
2858}
2859
2860
2861/** \#SS(seg) - 0c. */
2862DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2863{
2864 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2865 uSel & ~X86_SEL_RPL, 0);
2866}
2867
2868
2869/** \#GP(n) - 0d. */
2870DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2871{
2872 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2873}
2874
2875
2876/** \#GP(0) - 0d. */
2877DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2878{
2879 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2880}
2881
2882
2883/** \#GP(sel) - 0d. */
2884DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2885{
2886 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2887 Sel & ~X86_SEL_RPL, 0);
2888}
2889
2890
2891/** \#GP(0) - 0d. */
2892DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2893{
2894 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2895}
2896
2897
2898/** \#GP(sel) - 0d. */
2899DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2900{
2901 NOREF(iSegReg); NOREF(fAccess);
2902 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2903 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2904}
2905
2906
2907/** \#GP(sel) - 0d. */
2908DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2909{
2910 NOREF(Sel);
2911 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2912}
2913
2914
2915/** \#GP(sel) - 0d. */
2916DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2917{
2918 NOREF(iSegReg); NOREF(fAccess);
2919 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2920}
2921
2922
2923/** \#PF(n) - 0e. */
2924DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2925{
2926 uint16_t uErr;
2927 switch (rc)
2928 {
2929 case VERR_PAGE_NOT_PRESENT:
2930 case VERR_PAGE_TABLE_NOT_PRESENT:
2931 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2932 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2933 uErr = 0;
2934 break;
2935
2936 default:
2937 AssertMsgFailed(("%Rrc\n", rc));
2938 case VERR_ACCESS_DENIED:
2939 uErr = X86_TRAP_PF_P;
2940 break;
2941
2942 /** @todo reserved */
2943 }
2944
2945 if (pIemCpu->uCpl == 3)
2946 uErr |= X86_TRAP_PF_US;
2947
2948 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2949 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2950 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2951 uErr |= X86_TRAP_PF_ID;
2952
2953 /* Note! RW access callers reporting a WRITE protection fault, will clear
2954 the READ flag before calling. So, read-modify-write accesses (RW)
2955 can safely be reported as READ faults. */
2956 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2957 uErr |= X86_TRAP_PF_RW;
2958
2959 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2960 uErr, GCPtrWhere);
2961}
2962
2963
2964/** \#MF(0) - 10. */
2965DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2966{
2967 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2968}
2969
2970
2971/** \#AC(0) - 11. */
2972DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2973{
2974 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2975}
2976
2977
2978/**
2979 * Macro for calling iemCImplRaiseDivideError().
2980 *
2981 * This enables us to add/remove arguments and force different levels of
2982 * inlining as we wish.
2983 *
2984 * @return Strict VBox status code.
2985 */
2986#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2987IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2988{
2989 NOREF(cbInstr);
2990 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2991}
2992
2993
2994/**
2995 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2996 *
2997 * This enables us to add/remove arguments and force different levels of
2998 * inlining as we wish.
2999 *
3000 * @return Strict VBox status code.
3001 */
3002#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
3003IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3004{
3005 NOREF(cbInstr);
3006 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3007}
3008
3009
3010/**
3011 * Macro for calling iemCImplRaiseInvalidOpcode().
3012 *
3013 * This enables us to add/remove arguments and force different levels of
3014 * inlining as we wish.
3015 *
3016 * @return Strict VBox status code.
3017 */
3018#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
3019IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3020{
3021 NOREF(cbInstr);
3022 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3023}
3024
3025
3026/** @} */
3027
3028
3029/*
3030 *
3031 * Helpers routines.
3032 * Helpers routines.
3033 * Helpers routines.
3034 *
3035 */
3036
3037/**
3038 * Recalculates the effective operand size.
3039 *
3040 * @param pIemCpu The IEM state.
3041 */
3042static void iemRecalEffOpSize(PIEMCPU pIemCpu)
3043{
3044 switch (pIemCpu->enmCpuMode)
3045 {
3046 case IEMMODE_16BIT:
3047 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
3048 break;
3049 case IEMMODE_32BIT:
3050 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
3051 break;
3052 case IEMMODE_64BIT:
3053 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
3054 {
3055 case 0:
3056 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
3057 break;
3058 case IEM_OP_PRF_SIZE_OP:
3059 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3060 break;
3061 case IEM_OP_PRF_SIZE_REX_W:
3062 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
3063 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3064 break;
3065 }
3066 break;
3067 default:
3068 AssertFailed();
3069 }
3070}
3071
3072
3073/**
3074 * Sets the default operand size to 64-bit and recalculates the effective
3075 * operand size.
3076 *
3077 * @param pIemCpu The IEM state.
3078 */
3079static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
3080{
3081 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3082 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
3083 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
3084 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3085 else
3086 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3087}
3088
3089
3090/*
3091 *
3092 * Common opcode decoders.
3093 * Common opcode decoders.
3094 * Common opcode decoders.
3095 *
3096 */
3097//#include <iprt/mem.h>
3098
3099/**
3100 * Used to add extra details about a stub case.
3101 * @param pIemCpu The IEM per CPU state.
3102 */
3103static void iemOpStubMsg2(PIEMCPU pIemCpu)
3104{
3105#if defined(LOG_ENABLED) && defined(IN_RING3)
3106 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3107 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3108 char szRegs[4096];
3109 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3110 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3111 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3112 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3113 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3114 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3115 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3116 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3117 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3118 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3119 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3120 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3121 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3122 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3123 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3124 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3125 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3126 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3127 " efer=%016VR{efer}\n"
3128 " pat=%016VR{pat}\n"
3129 " sf_mask=%016VR{sf_mask}\n"
3130 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3131 " lstar=%016VR{lstar}\n"
3132 " star=%016VR{star} cstar=%016VR{cstar}\n"
3133 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3134 );
3135
3136 char szInstr[256];
3137 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3138 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3139 szInstr, sizeof(szInstr), NULL);
3140
3141 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
3142#else
3143 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
3144#endif
3145}
3146
3147/**
3148 * Complains about a stub.
3149 *
3150 * Providing two versions of this macro, one for daily use and one for use when
3151 * working on IEM.
3152 */
3153#if 0
3154# define IEMOP_BITCH_ABOUT_STUB() \
3155 do { \
3156 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
3157 iemOpStubMsg2(pIemCpu); \
3158 RTAssertPanic(); \
3159 } while (0)
3160#else
3161# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
3162#endif
3163
3164/** Stubs an opcode. */
3165#define FNIEMOP_STUB(a_Name) \
3166 FNIEMOP_DEF(a_Name) \
3167 { \
3168 IEMOP_BITCH_ABOUT_STUB(); \
3169 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3170 } \
3171 typedef int ignore_semicolon
3172
3173/** Stubs an opcode. */
3174#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
3175 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3176 { \
3177 IEMOP_BITCH_ABOUT_STUB(); \
3178 NOREF(a_Name0); \
3179 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3180 } \
3181 typedef int ignore_semicolon
3182
3183/** Stubs an opcode which currently should raise \#UD. */
3184#define FNIEMOP_UD_STUB(a_Name) \
3185 FNIEMOP_DEF(a_Name) \
3186 { \
3187 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3188 return IEMOP_RAISE_INVALID_OPCODE(); \
3189 } \
3190 typedef int ignore_semicolon
3191
3192/** Stubs an opcode which currently should raise \#UD. */
3193#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
3194 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3195 { \
3196 NOREF(a_Name0); \
3197 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3198 return IEMOP_RAISE_INVALID_OPCODE(); \
3199 } \
3200 typedef int ignore_semicolon
3201
3202
3203
3204/** @name Register Access.
3205 * @{
3206 */
3207
3208/**
3209 * Gets a reference (pointer) to the specified hidden segment register.
3210 *
3211 * @returns Hidden register reference.
3212 * @param pIemCpu The per CPU data.
3213 * @param iSegReg The segment register.
3214 */
3215static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
3216{
3217 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3218 PCPUMSELREG pSReg;
3219 switch (iSegReg)
3220 {
3221 case X86_SREG_ES: pSReg = &pCtx->es; break;
3222 case X86_SREG_CS: pSReg = &pCtx->cs; break;
3223 case X86_SREG_SS: pSReg = &pCtx->ss; break;
3224 case X86_SREG_DS: pSReg = &pCtx->ds; break;
3225 case X86_SREG_FS: pSReg = &pCtx->fs; break;
3226 case X86_SREG_GS: pSReg = &pCtx->gs; break;
3227 default:
3228 AssertFailedReturn(NULL);
3229 }
3230#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3231 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
3232 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
3233#else
3234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
3235#endif
3236 return pSReg;
3237}
3238
3239
3240/**
3241 * Gets a reference (pointer) to the specified segment register (the selector
3242 * value).
3243 *
3244 * @returns Pointer to the selector variable.
3245 * @param pIemCpu The per CPU data.
3246 * @param iSegReg The segment register.
3247 */
3248static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
3249{
3250 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3251 switch (iSegReg)
3252 {
3253 case X86_SREG_ES: return &pCtx->es.Sel;
3254 case X86_SREG_CS: return &pCtx->cs.Sel;
3255 case X86_SREG_SS: return &pCtx->ss.Sel;
3256 case X86_SREG_DS: return &pCtx->ds.Sel;
3257 case X86_SREG_FS: return &pCtx->fs.Sel;
3258 case X86_SREG_GS: return &pCtx->gs.Sel;
3259 }
3260 AssertFailedReturn(NULL);
3261}
3262
3263
3264/**
3265 * Fetches the selector value of a segment register.
3266 *
3267 * @returns The selector value.
3268 * @param pIemCpu The per CPU data.
3269 * @param iSegReg The segment register.
3270 */
3271static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3272{
3273 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3274 switch (iSegReg)
3275 {
3276 case X86_SREG_ES: return pCtx->es.Sel;
3277 case X86_SREG_CS: return pCtx->cs.Sel;
3278 case X86_SREG_SS: return pCtx->ss.Sel;
3279 case X86_SREG_DS: return pCtx->ds.Sel;
3280 case X86_SREG_FS: return pCtx->fs.Sel;
3281 case X86_SREG_GS: return pCtx->gs.Sel;
3282 }
3283 AssertFailedReturn(0xffff);
3284}
3285
3286
3287/**
3288 * Gets a reference (pointer) to the specified general register.
3289 *
3290 * @returns Register reference.
3291 * @param pIemCpu The per CPU data.
3292 * @param iReg The general register.
3293 */
3294static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3295{
3296 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3297 switch (iReg)
3298 {
3299 case X86_GREG_xAX: return &pCtx->rax;
3300 case X86_GREG_xCX: return &pCtx->rcx;
3301 case X86_GREG_xDX: return &pCtx->rdx;
3302 case X86_GREG_xBX: return &pCtx->rbx;
3303 case X86_GREG_xSP: return &pCtx->rsp;
3304 case X86_GREG_xBP: return &pCtx->rbp;
3305 case X86_GREG_xSI: return &pCtx->rsi;
3306 case X86_GREG_xDI: return &pCtx->rdi;
3307 case X86_GREG_x8: return &pCtx->r8;
3308 case X86_GREG_x9: return &pCtx->r9;
3309 case X86_GREG_x10: return &pCtx->r10;
3310 case X86_GREG_x11: return &pCtx->r11;
3311 case X86_GREG_x12: return &pCtx->r12;
3312 case X86_GREG_x13: return &pCtx->r13;
3313 case X86_GREG_x14: return &pCtx->r14;
3314 case X86_GREG_x15: return &pCtx->r15;
3315 }
3316 AssertFailedReturn(NULL);
3317}
3318
3319
3320/**
3321 * Gets a reference (pointer) to the specified 8-bit general register.
3322 *
3323 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3324 *
3325 * @returns Register reference.
3326 * @param pIemCpu The per CPU data.
3327 * @param iReg The register.
3328 */
3329static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3330{
3331 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3332 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3333
3334 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3335 if (iReg >= 4)
3336 pu8Reg++;
3337 return pu8Reg;
3338}
3339
3340
3341/**
3342 * Fetches the value of a 8-bit general register.
3343 *
3344 * @returns The register value.
3345 * @param pIemCpu The per CPU data.
3346 * @param iReg The register.
3347 */
3348static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3349{
3350 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3351 return *pbSrc;
3352}
3353
3354
3355/**
3356 * Fetches the value of a 16-bit general register.
3357 *
3358 * @returns The register value.
3359 * @param pIemCpu The per CPU data.
3360 * @param iReg The register.
3361 */
3362static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3363{
3364 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3365}
3366
3367
3368/**
3369 * Fetches the value of a 32-bit general register.
3370 *
3371 * @returns The register value.
3372 * @param pIemCpu The per CPU data.
3373 * @param iReg The register.
3374 */
3375static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3376{
3377 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3378}
3379
3380
3381/**
3382 * Fetches the value of a 64-bit general register.
3383 *
3384 * @returns The register value.
3385 * @param pIemCpu The per CPU data.
3386 * @param iReg The register.
3387 */
3388static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3389{
3390 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3391}
3392
3393
3394/**
3395 * Is the FPU state in FXSAVE format or not.
3396 *
3397 * @returns true if it is, false if it's in FNSAVE.
3398 * @param pVCpu Pointer to the VMCPU.
3399 */
3400DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3401{
3402#ifdef RT_ARCH_AMD64
3403 NOREF(pIemCpu);
3404 return true;
3405#else
3406 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3407 return true;
3408#endif
3409}
3410
3411
3412/**
3413 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3414 *
3415 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3416 * segment limit.
3417 *
3418 * @param pIemCpu The per CPU data.
3419 * @param offNextInstr The offset of the next instruction.
3420 */
3421static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3422{
3423 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3424 switch (pIemCpu->enmEffOpSize)
3425 {
3426 case IEMMODE_16BIT:
3427 {
3428 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3429 if ( uNewIp > pCtx->cs.u32Limit
3430 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3431 return iemRaiseGeneralProtectionFault0(pIemCpu);
3432 pCtx->rip = uNewIp;
3433 break;
3434 }
3435
3436 case IEMMODE_32BIT:
3437 {
3438 Assert(pCtx->rip <= UINT32_MAX);
3439 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3440
3441 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3442 if (uNewEip > pCtx->cs.u32Limit)
3443 return iemRaiseGeneralProtectionFault0(pIemCpu);
3444 pCtx->rip = uNewEip;
3445 break;
3446 }
3447
3448 case IEMMODE_64BIT:
3449 {
3450 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3451
3452 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3453 if (!IEM_IS_CANONICAL(uNewRip))
3454 return iemRaiseGeneralProtectionFault0(pIemCpu);
3455 pCtx->rip = uNewRip;
3456 break;
3457 }
3458
3459 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3460 }
3461
3462 return VINF_SUCCESS;
3463}
3464
3465
3466/**
3467 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3468 *
3469 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3470 * segment limit.
3471 *
3472 * @returns Strict VBox status code.
3473 * @param pIemCpu The per CPU data.
3474 * @param offNextInstr The offset of the next instruction.
3475 */
3476static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3477{
3478 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3479 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3480
3481 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3482 if ( uNewIp > pCtx->cs.u32Limit
3483 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3484 return iemRaiseGeneralProtectionFault0(pIemCpu);
3485 /** @todo Test 16-bit jump in 64-bit mode. */
3486 pCtx->rip = uNewIp;
3487
3488 return VINF_SUCCESS;
3489}
3490
3491
3492/**
3493 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3494 *
3495 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3496 * segment limit.
3497 *
3498 * @returns Strict VBox status code.
3499 * @param pIemCpu The per CPU data.
3500 * @param offNextInstr The offset of the next instruction.
3501 */
3502static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3503{
3504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3505 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3506
3507 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3508 {
3509 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3510
3511 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3512 if (uNewEip > pCtx->cs.u32Limit)
3513 return iemRaiseGeneralProtectionFault0(pIemCpu);
3514 pCtx->rip = uNewEip;
3515 }
3516 else
3517 {
3518 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3519
3520 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3521 if (!IEM_IS_CANONICAL(uNewRip))
3522 return iemRaiseGeneralProtectionFault0(pIemCpu);
3523 pCtx->rip = uNewRip;
3524 }
3525 return VINF_SUCCESS;
3526}
3527
3528
3529/**
3530 * Performs a near jump to the specified address.
3531 *
3532 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3533 * segment limit.
3534 *
3535 * @param pIemCpu The per CPU data.
3536 * @param uNewRip The new RIP value.
3537 */
3538static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3539{
3540 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3541 switch (pIemCpu->enmEffOpSize)
3542 {
3543 case IEMMODE_16BIT:
3544 {
3545 Assert(uNewRip <= UINT16_MAX);
3546 if ( uNewRip > pCtx->cs.u32Limit
3547 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3548 return iemRaiseGeneralProtectionFault0(pIemCpu);
3549 /** @todo Test 16-bit jump in 64-bit mode. */
3550 pCtx->rip = uNewRip;
3551 break;
3552 }
3553
3554 case IEMMODE_32BIT:
3555 {
3556 Assert(uNewRip <= UINT32_MAX);
3557 Assert(pCtx->rip <= UINT32_MAX);
3558 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3559
3560 if (uNewRip > pCtx->cs.u32Limit)
3561 return iemRaiseGeneralProtectionFault0(pIemCpu);
3562 pCtx->rip = uNewRip;
3563 break;
3564 }
3565
3566 case IEMMODE_64BIT:
3567 {
3568 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3569
3570 if (!IEM_IS_CANONICAL(uNewRip))
3571 return iemRaiseGeneralProtectionFault0(pIemCpu);
3572 pCtx->rip = uNewRip;
3573 break;
3574 }
3575
3576 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3577 }
3578
3579 return VINF_SUCCESS;
3580}
3581
3582
3583/**
3584 * Get the address of the top of the stack.
3585 *
3586 * @param pCtx The CPU context which SP/ESP/RSP should be
3587 * read.
3588 */
3589DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3590{
3591 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3592 return pCtx->rsp;
3593 if (pCtx->ss.Attr.n.u1DefBig)
3594 return pCtx->esp;
3595 return pCtx->sp;
3596}
3597
3598
3599/**
3600 * Updates the RIP/EIP/IP to point to the next instruction.
3601 *
3602 * @param pIemCpu The per CPU data.
3603 * @param cbInstr The number of bytes to add.
3604 */
3605static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3606{
3607 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3608 switch (pIemCpu->enmCpuMode)
3609 {
3610 case IEMMODE_16BIT:
3611 Assert(pCtx->rip <= UINT16_MAX);
3612 pCtx->eip += cbInstr;
3613 pCtx->eip &= UINT32_C(0xffff);
3614 break;
3615
3616 case IEMMODE_32BIT:
3617 pCtx->eip += cbInstr;
3618 Assert(pCtx->rip <= UINT32_MAX);
3619 break;
3620
3621 case IEMMODE_64BIT:
3622 pCtx->rip += cbInstr;
3623 break;
3624 default: AssertFailed();
3625 }
3626}
3627
3628
3629/**
3630 * Updates the RIP/EIP/IP to point to the next instruction.
3631 *
3632 * @param pIemCpu The per CPU data.
3633 */
3634static void iemRegUpdateRip(PIEMCPU pIemCpu)
3635{
3636 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3637}
3638
3639
3640/**
3641 * Adds to the stack pointer.
3642 *
3643 * @param pCtx The CPU context which SP/ESP/RSP should be
3644 * updated.
3645 * @param cbToAdd The number of bytes to add.
3646 */
3647DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3648{
3649 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3650 pCtx->rsp += cbToAdd;
3651 else if (pCtx->ss.Attr.n.u1DefBig)
3652 pCtx->esp += cbToAdd;
3653 else
3654 pCtx->sp += cbToAdd;
3655}
3656
3657
3658/**
3659 * Subtracts from the stack pointer.
3660 *
3661 * @param pCtx The CPU context which SP/ESP/RSP should be
3662 * updated.
3663 * @param cbToSub The number of bytes to subtract.
3664 */
3665DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3666{
3667 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3668 pCtx->rsp -= cbToSub;
3669 else if (pCtx->ss.Attr.n.u1DefBig)
3670 pCtx->esp -= cbToSub;
3671 else
3672 pCtx->sp -= cbToSub;
3673}
3674
3675
3676/**
3677 * Adds to the temporary stack pointer.
3678 *
3679 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3680 * @param cbToAdd The number of bytes to add.
3681 * @param pCtx Where to get the current stack mode.
3682 */
3683DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3684{
3685 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3686 pTmpRsp->u += cbToAdd;
3687 else if (pCtx->ss.Attr.n.u1DefBig)
3688 pTmpRsp->DWords.dw0 += cbToAdd;
3689 else
3690 pTmpRsp->Words.w0 += cbToAdd;
3691}
3692
3693
3694/**
3695 * Subtracts from the temporary stack pointer.
3696 *
3697 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3698 * @param cbToSub The number of bytes to subtract.
3699 * @param pCtx Where to get the current stack mode.
3700 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3701 * expecting that.
3702 */
3703DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3704{
3705 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3706 pTmpRsp->u -= cbToSub;
3707 else if (pCtx->ss.Attr.n.u1DefBig)
3708 pTmpRsp->DWords.dw0 -= cbToSub;
3709 else
3710 pTmpRsp->Words.w0 -= cbToSub;
3711}
3712
3713
3714/**
3715 * Calculates the effective stack address for a push of the specified size as
3716 * well as the new RSP value (upper bits may be masked).
3717 *
3718 * @returns Effective stack addressf for the push.
3719 * @param pCtx Where to get the current stack mode.
3720 * @param cbItem The size of the stack item to pop.
3721 * @param puNewRsp Where to return the new RSP value.
3722 */
3723DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3724{
3725 RTUINT64U uTmpRsp;
3726 RTGCPTR GCPtrTop;
3727 uTmpRsp.u = pCtx->rsp;
3728
3729 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3730 GCPtrTop = uTmpRsp.u -= cbItem;
3731 else if (pCtx->ss.Attr.n.u1DefBig)
3732 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3733 else
3734 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3735 *puNewRsp = uTmpRsp.u;
3736 return GCPtrTop;
3737}
3738
3739
3740/**
3741 * Gets the current stack pointer and calculates the value after a pop of the
3742 * specified size.
3743 *
3744 * @returns Current stack pointer.
3745 * @param pCtx Where to get the current stack mode.
3746 * @param cbItem The size of the stack item to pop.
3747 * @param puNewRsp Where to return the new RSP value.
3748 */
3749DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3750{
3751 RTUINT64U uTmpRsp;
3752 RTGCPTR GCPtrTop;
3753 uTmpRsp.u = pCtx->rsp;
3754
3755 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3756 {
3757 GCPtrTop = uTmpRsp.u;
3758 uTmpRsp.u += cbItem;
3759 }
3760 else if (pCtx->ss.Attr.n.u1DefBig)
3761 {
3762 GCPtrTop = uTmpRsp.DWords.dw0;
3763 uTmpRsp.DWords.dw0 += cbItem;
3764 }
3765 else
3766 {
3767 GCPtrTop = uTmpRsp.Words.w0;
3768 uTmpRsp.Words.w0 += cbItem;
3769 }
3770 *puNewRsp = uTmpRsp.u;
3771 return GCPtrTop;
3772}
3773
3774
3775/**
3776 * Calculates the effective stack address for a push of the specified size as
3777 * well as the new temporary RSP value (upper bits may be masked).
3778 *
3779 * @returns Effective stack addressf for the push.
3780 * @param pTmpRsp The temporary stack pointer. This is updated.
3781 * @param cbItem The size of the stack item to pop.
3782 * @param puNewRsp Where to return the new RSP value.
3783 */
3784DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3785{
3786 RTGCPTR GCPtrTop;
3787
3788 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3789 GCPtrTop = pTmpRsp->u -= cbItem;
3790 else if (pCtx->ss.Attr.n.u1DefBig)
3791 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3792 else
3793 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3794 return GCPtrTop;
3795}
3796
3797
3798/**
3799 * Gets the effective stack address for a pop of the specified size and
3800 * calculates and updates the temporary RSP.
3801 *
3802 * @returns Current stack pointer.
3803 * @param pTmpRsp The temporary stack pointer. This is updated.
3804 * @param pCtx Where to get the current stack mode.
3805 * @param cbItem The size of the stack item to pop.
3806 */
3807DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3808{
3809 RTGCPTR GCPtrTop;
3810 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
3811 {
3812 GCPtrTop = pTmpRsp->u;
3813 pTmpRsp->u += cbItem;
3814 }
3815 else if (pCtx->ss.Attr.n.u1DefBig)
3816 {
3817 GCPtrTop = pTmpRsp->DWords.dw0;
3818 pTmpRsp->DWords.dw0 += cbItem;
3819 }
3820 else
3821 {
3822 GCPtrTop = pTmpRsp->Words.w0;
3823 pTmpRsp->Words.w0 += cbItem;
3824 }
3825 return GCPtrTop;
3826}
3827
3828
3829/**
3830 * Checks if an Intel CPUID feature bit is set.
3831 *
3832 * @returns true / false.
3833 *
3834 * @param pIemCpu The IEM per CPU data.
3835 * @param fEdx The EDX bit to test, or 0 if ECX.
3836 * @param fEcx The ECX bit to test, or 0 if EDX.
3837 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3838 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3839 */
3840static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3841{
3842 uint32_t uEax, uEbx, uEcx, uEdx;
3843 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3844 return (fEcx && (uEcx & fEcx))
3845 || (fEdx && (uEdx & fEdx));
3846}
3847
3848
3849/**
3850 * Checks if an AMD CPUID feature bit is set.
3851 *
3852 * @returns true / false.
3853 *
3854 * @param pIemCpu The IEM per CPU data.
3855 * @param fEdx The EDX bit to test, or 0 if ECX.
3856 * @param fEcx The ECX bit to test, or 0 if EDX.
3857 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3858 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3859 */
3860static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3861{
3862 uint32_t uEax, uEbx, uEcx, uEdx;
3863 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3864 return (fEcx && (uEcx & fEcx))
3865 || (fEdx && (uEdx & fEdx));
3866}
3867
3868/** @} */
3869
3870
3871/** @name FPU access and helpers.
3872 *
3873 * @{
3874 */
3875
3876
3877/**
3878 * Hook for preparing to use the host FPU.
3879 *
3880 * This is necessary in ring-0 and raw-mode context.
3881 *
3882 * @param pIemCpu The IEM per CPU data.
3883 */
3884DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3885{
3886#ifdef IN_RING3
3887 NOREF(pIemCpu);
3888#else
3889/** @todo RZ: FIXME */
3890//# error "Implement me"
3891#endif
3892}
3893
3894
3895/**
3896 * Stores a QNaN value into a FPU register.
3897 *
3898 * @param pReg Pointer to the register.
3899 */
3900DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3901{
3902 pReg->au32[0] = UINT32_C(0x00000000);
3903 pReg->au32[1] = UINT32_C(0xc0000000);
3904 pReg->au16[4] = UINT16_C(0xffff);
3905}
3906
3907
3908/**
3909 * Updates the FOP, FPU.CS and FPUIP registers.
3910 *
3911 * @param pIemCpu The IEM per CPU data.
3912 * @param pCtx The CPU context.
3913 */
3914DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3915{
3916 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3917 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3918 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3919 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3920 {
3921 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3922 * happens in real mode here based on the fnsave and fnstenv images. */
3923 pCtx->fpu.CS = 0;
3924 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3925 }
3926 else
3927 {
3928 pCtx->fpu.CS = pCtx->cs.Sel;
3929 pCtx->fpu.FPUIP = pCtx->rip;
3930 }
3931}
3932
3933
3934/**
3935 * Updates the FPU.DS and FPUDP registers.
3936 *
3937 * @param pIemCpu The IEM per CPU data.
3938 * @param pCtx The CPU context.
3939 * @param iEffSeg The effective segment register.
3940 * @param GCPtrEff The effective address relative to @a iEffSeg.
3941 */
3942DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3943{
3944 RTSEL sel;
3945 switch (iEffSeg)
3946 {
3947 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3948 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3949 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3950 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3951 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3952 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3953 default:
3954 AssertMsgFailed(("%d\n", iEffSeg));
3955 sel = pCtx->ds.Sel;
3956 }
3957 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3958 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3959 {
3960 pCtx->fpu.DS = 0;
3961 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3962 }
3963 else
3964 {
3965 pCtx->fpu.DS = sel;
3966 pCtx->fpu.FPUDP = GCPtrEff;
3967 }
3968}
3969
3970
3971/**
3972 * Rotates the stack registers in the push direction.
3973 *
3974 * @param pCtx The CPU context.
3975 * @remarks This is a complete waste of time, but fxsave stores the registers in
3976 * stack order.
3977 */
3978DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3979{
3980 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3981 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3982 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3983 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3984 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3985 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3986 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3987 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3988 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3989}
3990
3991
3992/**
3993 * Rotates the stack registers in the pop direction.
3994 *
3995 * @param pCtx The CPU context.
3996 * @remarks This is a complete waste of time, but fxsave stores the registers in
3997 * stack order.
3998 */
3999DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
4000{
4001 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
4002 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
4003 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
4004 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
4005 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
4006 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
4007 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
4008 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
4009 pCtx->fpu.aRegs[7].r80 = r80Tmp;
4010}
4011
4012
4013/**
4014 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4015 * exception prevents it.
4016 *
4017 * @param pIemCpu The IEM per CPU data.
4018 * @param pResult The FPU operation result to push.
4019 * @param pCtx The CPU context.
4020 */
4021static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
4022{
4023 /* Update FSW and bail if there are pending exceptions afterwards. */
4024 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4025 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4026 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4027 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4028 {
4029 pCtx->fpu.FSW = fFsw;
4030 return;
4031 }
4032
4033 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4034 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4035 {
4036 /* All is fine, push the actual value. */
4037 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4038 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
4039 }
4040 else if (pCtx->fpu.FCW & X86_FCW_IM)
4041 {
4042 /* Masked stack overflow, push QNaN. */
4043 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4044 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4045 }
4046 else
4047 {
4048 /* Raise stack overflow, don't push anything. */
4049 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4050 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4051 return;
4052 }
4053
4054 fFsw &= ~X86_FSW_TOP_MASK;
4055 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4056 pCtx->fpu.FSW = fFsw;
4057
4058 iemFpuRotateStackPush(pCtx);
4059}
4060
4061
4062/**
4063 * Stores a result in a FPU register and updates the FSW and FTW.
4064 *
4065 * @param pIemCpu The IEM per CPU data.
4066 * @param pResult The result to store.
4067 * @param iStReg Which FPU register to store it in.
4068 * @param pCtx The CPU context.
4069 */
4070static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
4071{
4072 Assert(iStReg < 8);
4073 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4074 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4075 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4076 pCtx->fpu.FTW |= RT_BIT(iReg);
4077 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
4078}
4079
4080
4081/**
4082 * Only updates the FPU status word (FSW) with the result of the current
4083 * instruction.
4084 *
4085 * @param pCtx The CPU context.
4086 * @param u16FSW The FSW output of the current instruction.
4087 */
4088static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
4089{
4090 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4091 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4092}
4093
4094
4095/**
4096 * Pops one item off the FPU stack if no pending exception prevents it.
4097 *
4098 * @param pCtx The CPU context.
4099 */
4100static void iemFpuMaybePopOne(PCPUMCTX pCtx)
4101{
4102 /* Check pending exceptions. */
4103 uint16_t uFSW = pCtx->fpu.FSW;
4104 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4105 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4106 return;
4107
4108 /* TOP--. */
4109 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4110 uFSW &= ~X86_FSW_TOP_MASK;
4111 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4112 pCtx->fpu.FSW = uFSW;
4113
4114 /* Mark the previous ST0 as empty. */
4115 iOldTop >>= X86_FSW_TOP_SHIFT;
4116 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
4117
4118 /* Rotate the registers. */
4119 iemFpuRotateStackPop(pCtx);
4120}
4121
4122
4123/**
4124 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4125 *
4126 * @param pIemCpu The IEM per CPU data.
4127 * @param pResult The FPU operation result to push.
4128 */
4129static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
4130{
4131 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4132 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4133 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4134}
4135
4136
4137/**
4138 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4139 * and sets FPUDP and FPUDS.
4140 *
4141 * @param pIemCpu The IEM per CPU data.
4142 * @param pResult The FPU operation result to push.
4143 * @param iEffSeg The effective segment register.
4144 * @param GCPtrEff The effective address relative to @a iEffSeg.
4145 */
4146static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4147{
4148 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4149 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4150 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4151 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4152}
4153
4154
4155/**
4156 * Replace ST0 with the first value and push the second onto the FPU stack,
4157 * unless a pending exception prevents it.
4158 *
4159 * @param pIemCpu The IEM per CPU data.
4160 * @param pResult The FPU operation result to store and push.
4161 */
4162static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
4163{
4164 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4165 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4166
4167 /* Update FSW and bail if there are pending exceptions afterwards. */
4168 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4169 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4170 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4171 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4172 {
4173 pCtx->fpu.FSW = fFsw;
4174 return;
4175 }
4176
4177 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4178 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4179 {
4180 /* All is fine, push the actual value. */
4181 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4182 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
4183 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
4184 }
4185 else if (pCtx->fpu.FCW & X86_FCW_IM)
4186 {
4187 /* Masked stack overflow, push QNaN. */
4188 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4189 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4190 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4191 }
4192 else
4193 {
4194 /* Raise stack overflow, don't push anything. */
4195 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4196 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4197 return;
4198 }
4199
4200 fFsw &= ~X86_FSW_TOP_MASK;
4201 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4202 pCtx->fpu.FSW = fFsw;
4203
4204 iemFpuRotateStackPush(pCtx);
4205}
4206
4207
4208/**
4209 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4210 * FOP.
4211 *
4212 * @param pIemCpu The IEM per CPU data.
4213 * @param pResult The result to store.
4214 * @param iStReg Which FPU register to store it in.
4215 * @param pCtx The CPU context.
4216 */
4217static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4218{
4219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4220 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4221 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4222}
4223
4224
4225/**
4226 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4227 * FOP, and then pops the stack.
4228 *
4229 * @param pIemCpu The IEM per CPU data.
4230 * @param pResult The result to store.
4231 * @param iStReg Which FPU register to store it in.
4232 * @param pCtx The CPU context.
4233 */
4234static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4235{
4236 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4237 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4238 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4239 iemFpuMaybePopOne(pCtx);
4240}
4241
4242
4243/**
4244 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4245 * FPUDP, and FPUDS.
4246 *
4247 * @param pIemCpu The IEM per CPU data.
4248 * @param pResult The result to store.
4249 * @param iStReg Which FPU register to store it in.
4250 * @param pCtx The CPU context.
4251 * @param iEffSeg The effective memory operand selector register.
4252 * @param GCPtrEff The effective memory operand offset.
4253 */
4254static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4255{
4256 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4257 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
4258 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4259 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4260}
4261
4262
4263/**
4264 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4265 * FPUDP, and FPUDS, and then pops the stack.
4266 *
4267 * @param pIemCpu The IEM per CPU data.
4268 * @param pResult The result to store.
4269 * @param iStReg Which FPU register to store it in.
4270 * @param pCtx The CPU context.
4271 * @param iEffSeg The effective memory operand selector register.
4272 * @param GCPtrEff The effective memory operand offset.
4273 */
4274static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4275 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4276{
4277 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4278 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4279 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4280 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4281 iemFpuMaybePopOne(pCtx);
4282}
4283
4284
4285/**
4286 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4287 *
4288 * @param pIemCpu The IEM per CPU data.
4289 */
4290static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4291{
4292 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4293}
4294
4295
4296/**
4297 * Marks the specified stack register as free (for FFREE).
4298 *
4299 * @param pIemCpu The IEM per CPU data.
4300 * @param iStReg The register to free.
4301 */
4302static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4303{
4304 Assert(iStReg < 8);
4305 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4306 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4307 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4308}
4309
4310
4311/**
4312 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4313 *
4314 * @param pIemCpu The IEM per CPU data.
4315 */
4316static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4317{
4318 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4319 uint16_t uFsw = pCtx->fpu.FSW;
4320 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4321 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4322 uFsw &= ~X86_FSW_TOP_MASK;
4323 uFsw |= uTop;
4324 pCtx->fpu.FSW = uFsw;
4325}
4326
4327
4328/**
4329 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4330 *
4331 * @param pIemCpu The IEM per CPU data.
4332 */
4333static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4334{
4335 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4336 uint16_t uFsw = pCtx->fpu.FSW;
4337 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4338 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4339 uFsw &= ~X86_FSW_TOP_MASK;
4340 uFsw |= uTop;
4341 pCtx->fpu.FSW = uFsw;
4342}
4343
4344
4345/**
4346 * Updates the FSW, FOP, FPUIP, and FPUCS.
4347 *
4348 * @param pIemCpu The IEM per CPU data.
4349 * @param u16FSW The FSW from the current instruction.
4350 */
4351static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4352{
4353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4354 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4355 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4356}
4357
4358
4359/**
4360 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4361 *
4362 * @param pIemCpu The IEM per CPU data.
4363 * @param u16FSW The FSW from the current instruction.
4364 */
4365static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4366{
4367 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4368 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4369 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4370 iemFpuMaybePopOne(pCtx);
4371}
4372
4373
4374/**
4375 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4376 *
4377 * @param pIemCpu The IEM per CPU data.
4378 * @param u16FSW The FSW from the current instruction.
4379 * @param iEffSeg The effective memory operand selector register.
4380 * @param GCPtrEff The effective memory operand offset.
4381 */
4382static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4383{
4384 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4385 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4386 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4387 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4388}
4389
4390
4391/**
4392 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4393 *
4394 * @param pIemCpu The IEM per CPU data.
4395 * @param u16FSW The FSW from the current instruction.
4396 */
4397static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4398{
4399 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4400 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4401 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4402 iemFpuMaybePopOne(pCtx);
4403 iemFpuMaybePopOne(pCtx);
4404}
4405
4406
4407/**
4408 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4409 *
4410 * @param pIemCpu The IEM per CPU data.
4411 * @param u16FSW The FSW from the current instruction.
4412 * @param iEffSeg The effective memory operand selector register.
4413 * @param GCPtrEff The effective memory operand offset.
4414 */
4415static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4416{
4417 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4418 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4419 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4420 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4421 iemFpuMaybePopOne(pCtx);
4422}
4423
4424
4425/**
4426 * Worker routine for raising an FPU stack underflow exception.
4427 *
4428 * @param pIemCpu The IEM per CPU data.
4429 * @param iStReg The stack register being accessed.
4430 * @param pCtx The CPU context.
4431 */
4432static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4433{
4434 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4435 if (pCtx->fpu.FCW & X86_FCW_IM)
4436 {
4437 /* Masked underflow. */
4438 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4439 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4440 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4441 if (iStReg != UINT8_MAX)
4442 {
4443 pCtx->fpu.FTW |= RT_BIT(iReg);
4444 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4445 }
4446 }
4447 else
4448 {
4449 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4450 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4451 }
4452}
4453
4454
4455/**
4456 * Raises a FPU stack underflow exception.
4457 *
4458 * @param pIemCpu The IEM per CPU data.
4459 * @param iStReg The destination register that should be loaded
4460 * with QNaN if \#IS is not masked. Specify
4461 * UINT8_MAX if none (like for fcom).
4462 */
4463DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4464{
4465 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4466 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4467 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4468}
4469
4470
4471DECL_NO_INLINE(static, void)
4472iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4473{
4474 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4475 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4476 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4477 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4478}
4479
4480
4481DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4482{
4483 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4484 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4485 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4486 iemFpuMaybePopOne(pCtx);
4487}
4488
4489
4490DECL_NO_INLINE(static, void)
4491iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4492{
4493 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4494 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4495 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4496 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4497 iemFpuMaybePopOne(pCtx);
4498}
4499
4500
4501DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4502{
4503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4504 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4505 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4506 iemFpuMaybePopOne(pCtx);
4507 iemFpuMaybePopOne(pCtx);
4508}
4509
4510
4511DECL_NO_INLINE(static, void)
4512iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4513{
4514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4515 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4516
4517 if (pCtx->fpu.FCW & X86_FCW_IM)
4518 {
4519 /* Masked overflow - Push QNaN. */
4520 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4521 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4522 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4523 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4524 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4525 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4526 iemFpuRotateStackPush(pCtx);
4527 }
4528 else
4529 {
4530 /* Exception pending - don't change TOP or the register stack. */
4531 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4532 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4533 }
4534}
4535
4536
4537DECL_NO_INLINE(static, void)
4538iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4539{
4540 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4541 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4542
4543 if (pCtx->fpu.FCW & X86_FCW_IM)
4544 {
4545 /* Masked overflow - Push QNaN. */
4546 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4547 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4548 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4549 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4550 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4551 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4552 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4553 iemFpuRotateStackPush(pCtx);
4554 }
4555 else
4556 {
4557 /* Exception pending - don't change TOP or the register stack. */
4558 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4559 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4560 }
4561}
4562
4563
4564/**
4565 * Worker routine for raising an FPU stack overflow exception on a push.
4566 *
4567 * @param pIemCpu The IEM per CPU data.
4568 * @param pCtx The CPU context.
4569 */
4570static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4571{
4572 if (pCtx->fpu.FCW & X86_FCW_IM)
4573 {
4574 /* Masked overflow. */
4575 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4576 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4577 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4578 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4579 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4580 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4581 iemFpuRotateStackPush(pCtx);
4582 }
4583 else
4584 {
4585 /* Exception pending - don't change TOP or the register stack. */
4586 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4587 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4588 }
4589}
4590
4591
4592/**
4593 * Raises a FPU stack overflow exception on a push.
4594 *
4595 * @param pIemCpu The IEM per CPU data.
4596 */
4597DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4598{
4599 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4600 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4601 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4602}
4603
4604
4605/**
4606 * Raises a FPU stack overflow exception on a push with a memory operand.
4607 *
4608 * @param pIemCpu The IEM per CPU data.
4609 * @param iEffSeg The effective memory operand selector register.
4610 * @param GCPtrEff The effective memory operand offset.
4611 */
4612DECL_NO_INLINE(static, void)
4613iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4614{
4615 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4616 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4617 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4618 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4619}
4620
4621
4622static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4623{
4624 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4625 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4626 if (pCtx->fpu.FTW & RT_BIT(iReg))
4627 return VINF_SUCCESS;
4628 return VERR_NOT_FOUND;
4629}
4630
4631
4632static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4633{
4634 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4635 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4636 if (pCtx->fpu.FTW & RT_BIT(iReg))
4637 {
4638 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4639 return VINF_SUCCESS;
4640 }
4641 return VERR_NOT_FOUND;
4642}
4643
4644
4645static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4646 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4647{
4648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4649 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4650 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4651 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4652 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4653 {
4654 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4655 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4656 return VINF_SUCCESS;
4657 }
4658 return VERR_NOT_FOUND;
4659}
4660
4661
4662static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4663{
4664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4665 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4666 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4667 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4668 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4669 {
4670 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4671 return VINF_SUCCESS;
4672 }
4673 return VERR_NOT_FOUND;
4674}
4675
4676
4677/**
4678 * Updates the FPU exception status after FCW is changed.
4679 *
4680 * @param pCtx The CPU context.
4681 */
4682static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4683{
4684 uint16_t u16Fsw = pCtx->fpu.FSW;
4685 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4686 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4687 else
4688 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4689 pCtx->fpu.FSW = u16Fsw;
4690}
4691
4692
4693/**
4694 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4695 *
4696 * @returns The full FTW.
4697 * @param pCtx The CPU state.
4698 */
4699static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4700{
4701 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4702 uint16_t u16Ftw = 0;
4703 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4704 for (unsigned iSt = 0; iSt < 8; iSt++)
4705 {
4706 unsigned const iReg = (iSt + iTop) & 7;
4707 if (!(u8Ftw & RT_BIT(iReg)))
4708 u16Ftw |= 3 << (iReg * 2); /* empty */
4709 else
4710 {
4711 uint16_t uTag;
4712 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4713 if (pr80Reg->s.uExponent == 0x7fff)
4714 uTag = 2; /* Exponent is all 1's => Special. */
4715 else if (pr80Reg->s.uExponent == 0x0000)
4716 {
4717 if (pr80Reg->s.u64Mantissa == 0x0000)
4718 uTag = 1; /* All bits are zero => Zero. */
4719 else
4720 uTag = 2; /* Must be special. */
4721 }
4722 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4723 uTag = 0; /* Valid. */
4724 else
4725 uTag = 2; /* Must be special. */
4726
4727 u16Ftw |= uTag << (iReg * 2); /* empty */
4728 }
4729 }
4730
4731 return u16Ftw;
4732}
4733
4734
4735/**
4736 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4737 *
4738 * @returns The compressed FTW.
4739 * @param u16FullFtw The full FTW to convert.
4740 */
4741static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4742{
4743 uint8_t u8Ftw = 0;
4744 for (unsigned i = 0; i < 8; i++)
4745 {
4746 if ((u16FullFtw & 3) != 3 /*empty*/)
4747 u8Ftw |= RT_BIT(i);
4748 u16FullFtw >>= 2;
4749 }
4750
4751 return u8Ftw;
4752}
4753
4754/** @} */
4755
4756
4757/** @name Memory access.
4758 *
4759 * @{
4760 */
4761
4762
4763/**
4764 * Updates the IEMCPU::cbWritten counter if applicable.
4765 *
4766 * @param pIemCpu The IEM per CPU data.
4767 * @param fAccess The access being accounted for.
4768 * @param cbMem The access size.
4769 */
4770DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4771{
4772 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4773 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4774 pIemCpu->cbWritten += (uint32_t)cbMem;
4775}
4776
4777
4778/**
4779 * Checks if the given segment can be written to, raise the appropriate
4780 * exception if not.
4781 *
4782 * @returns VBox strict status code.
4783 *
4784 * @param pIemCpu The IEM per CPU data.
4785 * @param pHid Pointer to the hidden register.
4786 * @param iSegReg The register number.
4787 * @param pu64BaseAddr Where to return the base address to use for the
4788 * segment. (In 64-bit code it may differ from the
4789 * base in the hidden segment.)
4790 */
4791static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4792{
4793 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4794 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4795 else
4796 {
4797 if (!pHid->Attr.n.u1Present)
4798 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4799
4800 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4801 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4802 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4803 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4804 *pu64BaseAddr = pHid->u64Base;
4805 }
4806 return VINF_SUCCESS;
4807}
4808
4809
4810/**
4811 * Checks if the given segment can be read from, raise the appropriate
4812 * exception if not.
4813 *
4814 * @returns VBox strict status code.
4815 *
4816 * @param pIemCpu The IEM per CPU data.
4817 * @param pHid Pointer to the hidden register.
4818 * @param iSegReg The register number.
4819 * @param pu64BaseAddr Where to return the base address to use for the
4820 * segment. (In 64-bit code it may differ from the
4821 * base in the hidden segment.)
4822 */
4823static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
4824{
4825 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4826 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4827 else
4828 {
4829 if (!pHid->Attr.n.u1Present)
4830 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4831
4832 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4833 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4834 *pu64BaseAddr = pHid->u64Base;
4835 }
4836 return VINF_SUCCESS;
4837}
4838
4839
4840/**
4841 * Applies the segment limit, base and attributes.
4842 *
4843 * This may raise a \#GP or \#SS.
4844 *
4845 * @returns VBox strict status code.
4846 *
4847 * @param pIemCpu The IEM per CPU data.
4848 * @param fAccess The kind of access which is being performed.
4849 * @param iSegReg The index of the segment register to apply.
4850 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4851 * TSS, ++).
4852 * @param pGCPtrMem Pointer to the guest memory address to apply
4853 * segmentation to. Input and output parameter.
4854 */
4855static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4856 size_t cbMem, PRTGCPTR pGCPtrMem)
4857{
4858 if (iSegReg == UINT8_MAX)
4859 return VINF_SUCCESS;
4860
4861 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4862 switch (pIemCpu->enmCpuMode)
4863 {
4864 case IEMMODE_16BIT:
4865 case IEMMODE_32BIT:
4866 {
4867 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4868 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4869
4870 Assert(pSel->Attr.n.u1Present);
4871 Assert(pSel->Attr.n.u1DescType);
4872 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4873 {
4874 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4875 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4876 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4877
4878 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4879 {
4880 /** @todo CPL check. */
4881 }
4882
4883 /*
4884 * There are two kinds of data selectors, normal and expand down.
4885 */
4886 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4887 {
4888 if ( GCPtrFirst32 > pSel->u32Limit
4889 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4890 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4891
4892 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4893 }
4894 else
4895 {
4896 /** @todo implement expand down segments. */
4897 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4898 }
4899 }
4900 else
4901 {
4902
4903 /*
4904 * Code selector and usually be used to read thru, writing is
4905 * only permitted in real and V8086 mode.
4906 */
4907 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4908 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4909 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4910 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4911 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4912
4913 if ( GCPtrFirst32 > pSel->u32Limit
4914 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4915 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4916
4917 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4918 {
4919 /** @todo CPL check. */
4920 }
4921
4922 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4923 }
4924 return VINF_SUCCESS;
4925 }
4926
4927 case IEMMODE_64BIT:
4928 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4929 *pGCPtrMem += pSel->u64Base;
4930 return VINF_SUCCESS;
4931
4932 default:
4933 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4934 }
4935}
4936
4937
4938/**
4939 * Translates a virtual address to a physical physical address and checks if we
4940 * can access the page as specified.
4941 *
4942 * @param pIemCpu The IEM per CPU data.
4943 * @param GCPtrMem The virtual address.
4944 * @param fAccess The intended access.
4945 * @param pGCPhysMem Where to return the physical address.
4946 */
4947static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4948 PRTGCPHYS pGCPhysMem)
4949{
4950 /** @todo Need a different PGM interface here. We're currently using
4951 * generic / REM interfaces. this won't cut it for R0 & RC. */
4952 RTGCPHYS GCPhys;
4953 uint64_t fFlags;
4954 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4955 if (RT_FAILURE(rc))
4956 {
4957 /** @todo Check unassigned memory in unpaged mode. */
4958 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4959 *pGCPhysMem = NIL_RTGCPHYS;
4960 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4961 }
4962
4963 /* If the page is writable and does not have the no-exec bit set, all
4964 access is allowed. Otherwise we'll have to check more carefully... */
4965 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4966 {
4967 /* Write to read only memory? */
4968 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4969 && !(fFlags & X86_PTE_RW)
4970 && ( pIemCpu->uCpl != 0
4971 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4972 {
4973 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4974 *pGCPhysMem = NIL_RTGCPHYS;
4975 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4976 }
4977
4978 /* Kernel memory accessed by userland? */
4979 if ( !(fFlags & X86_PTE_US)
4980 && pIemCpu->uCpl == 3
4981 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4982 {
4983 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4984 *pGCPhysMem = NIL_RTGCPHYS;
4985 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4986 }
4987
4988 /* Executing non-executable memory? */
4989 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4990 && (fFlags & X86_PTE_PAE_NX)
4991 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4992 {
4993 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4994 *pGCPhysMem = NIL_RTGCPHYS;
4995 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4996 VERR_ACCESS_DENIED);
4997 }
4998 }
4999
5000 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
5001 *pGCPhysMem = GCPhys;
5002 return VINF_SUCCESS;
5003}
5004
5005
5006
5007/**
5008 * Maps a physical page.
5009 *
5010 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
5011 * @param pIemCpu The IEM per CPU data.
5012 * @param GCPhysMem The physical address.
5013 * @param fAccess The intended access.
5014 * @param ppvMem Where to return the mapping address.
5015 * @param pLock The PGM lock.
5016 */
5017static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
5018{
5019#ifdef IEM_VERIFICATION_MODE_FULL
5020 /* Force the alternative path so we can ignore writes. */
5021 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
5022 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5023#endif
5024#ifdef IEM_LOG_MEMORY_WRITES
5025 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5026 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5027#endif
5028#ifdef IEM_VERIFICATION_MODE_MINIMAL
5029 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5030#endif
5031
5032 /** @todo This API may require some improving later. A private deal with PGM
5033 * regarding locking and unlocking needs to be struct. A couple of TLBs
5034 * living in PGM, but with publicly accessible inlined access methods
5035 * could perhaps be an even better solution. */
5036 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
5037 GCPhysMem,
5038 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
5039 pIemCpu->fBypassHandlers,
5040 ppvMem,
5041 pLock);
5042 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
5043 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5044 return rc;
5045}
5046
5047
5048/**
5049 * Unmap a page previously mapped by iemMemPageMap.
5050 *
5051 * @param pIemCpu The IEM per CPU data.
5052 * @param GCPhysMem The physical address.
5053 * @param fAccess The intended access.
5054 * @param pvMem What iemMemPageMap returned.
5055 * @param pLock The PGM lock.
5056 */
5057DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
5058{
5059 NOREF(pIemCpu);
5060 NOREF(GCPhysMem);
5061 NOREF(fAccess);
5062 NOREF(pvMem);
5063 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
5064}
5065
5066
5067/**
5068 * Looks up a memory mapping entry.
5069 *
5070 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5071 * @param pIemCpu The IEM per CPU data.
5072 * @param pvMem The memory address.
5073 * @param fAccess The access to.
5074 */
5075DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5076{
5077 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5078 if ( pIemCpu->aMemMappings[0].pv == pvMem
5079 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5080 return 0;
5081 if ( pIemCpu->aMemMappings[1].pv == pvMem
5082 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5083 return 1;
5084 if ( pIemCpu->aMemMappings[2].pv == pvMem
5085 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5086 return 2;
5087 return VERR_NOT_FOUND;
5088}
5089
5090
5091/**
5092 * Finds a free memmap entry when using iNextMapping doesn't work.
5093 *
5094 * @returns Memory mapping index, 1024 on failure.
5095 * @param pIemCpu The IEM per CPU data.
5096 */
5097static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
5098{
5099 /*
5100 * The easy case.
5101 */
5102 if (pIemCpu->cActiveMappings == 0)
5103 {
5104 pIemCpu->iNextMapping = 1;
5105 return 0;
5106 }
5107
5108 /* There should be enough mappings for all instructions. */
5109 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
5110
5111 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
5112 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5113 return i;
5114
5115 AssertFailedReturn(1024);
5116}
5117
5118
5119/**
5120 * Commits a bounce buffer that needs writing back and unmaps it.
5121 *
5122 * @returns Strict VBox status code.
5123 * @param pIemCpu The IEM per CPU data.
5124 * @param iMemMap The index of the buffer to commit.
5125 */
5126static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
5127{
5128 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5129 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5130
5131 /*
5132 * Do the writing.
5133 */
5134 int rc;
5135#ifndef IEM_VERIFICATION_MODE_MINIMAL
5136 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
5137 && !IEM_VERIFICATION_ENABLED(pIemCpu))
5138 {
5139 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5140 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5141 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5142 if (!pIemCpu->fBypassHandlers)
5143 {
5144 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5145 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5146 pbBuf,
5147 cbFirst);
5148 if (cbSecond && rc == VINF_SUCCESS)
5149 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5150 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5151 pbBuf + cbFirst,
5152 cbSecond);
5153 }
5154 else
5155 {
5156 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5157 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5158 pbBuf,
5159 cbFirst);
5160 if (cbSecond && rc == VINF_SUCCESS)
5161 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5162 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5163 pbBuf + cbFirst,
5164 cbSecond);
5165 }
5166 if (rc != VINF_SUCCESS)
5167 {
5168 /** @todo status code handling */
5169 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5170 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
5171 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5172 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5173 }
5174 }
5175 else
5176#endif
5177 rc = VINF_SUCCESS;
5178
5179#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5180 /*
5181 * Record the write(s).
5182 */
5183 if (!pIemCpu->fNoRem)
5184 {
5185 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5186 if (pEvtRec)
5187 {
5188 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5189 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
5190 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5191 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
5192 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
5193 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5194 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5195 }
5196 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5197 {
5198 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5199 if (pEvtRec)
5200 {
5201 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5202 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
5203 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5204 memcpy(pEvtRec->u.RamWrite.ab,
5205 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
5206 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
5207 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5208 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5209 }
5210 }
5211 }
5212#endif
5213#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
5214 if (rc == VINF_SUCCESS)
5215 {
5216 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5217 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
5218 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5219 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5220 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
5221 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
5222
5223 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5224 g_cbIemWrote = cbWrote;
5225 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5226 }
5227#endif
5228
5229 /*
5230 * Free the mapping entry.
5231 */
5232 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5233 Assert(pIemCpu->cActiveMappings != 0);
5234 pIemCpu->cActiveMappings--;
5235 return rc;
5236}
5237
5238
5239/**
5240 * iemMemMap worker that deals with a request crossing pages.
5241 */
5242static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
5243 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5244{
5245 /*
5246 * Do the address translations.
5247 */
5248 RTGCPHYS GCPhysFirst;
5249 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5250 if (rcStrict != VINF_SUCCESS)
5251 return rcStrict;
5252
5253/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
5254 * last byte. */
5255 RTGCPHYS GCPhysSecond;
5256 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
5257 if (rcStrict != VINF_SUCCESS)
5258 return rcStrict;
5259 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
5260
5261 /*
5262 * Read in the current memory content if it's a read, execute or partial
5263 * write access.
5264 */
5265 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5266 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
5267 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5268
5269 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5270 {
5271 int rc;
5272 if (!pIemCpu->fBypassHandlers)
5273 {
5274 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5275 if (rc != VINF_SUCCESS)
5276 {
5277 /** @todo status code handling */
5278 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5279 return rc;
5280 }
5281 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5282 if (rc != VINF_SUCCESS)
5283 {
5284 /** @todo status code handling */
5285 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5286 return rc;
5287 }
5288 }
5289 else
5290 {
5291 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5292 if (rc != VINF_SUCCESS)
5293 {
5294 /** @todo status code handling */
5295 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5296 return rc;
5297 }
5298 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5299 if (rc != VINF_SUCCESS)
5300 {
5301 /** @todo status code handling */
5302 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5303 return rc;
5304 }
5305 }
5306
5307#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5308 if ( !pIemCpu->fNoRem
5309 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5310 {
5311 /*
5312 * Record the reads.
5313 */
5314 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5315 if (pEvtRec)
5316 {
5317 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5318 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5319 pEvtRec->u.RamRead.cb = cbFirstPage;
5320 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5321 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5322 }
5323 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5324 if (pEvtRec)
5325 {
5326 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5327 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5328 pEvtRec->u.RamRead.cb = cbSecondPage;
5329 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5330 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5331 }
5332 }
5333#endif
5334 }
5335#ifdef VBOX_STRICT
5336 else
5337 memset(pbBuf, 0xcc, cbMem);
5338#endif
5339#ifdef VBOX_STRICT
5340 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5341 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5342#endif
5343
5344 /*
5345 * Commit the bounce buffer entry.
5346 */
5347 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5348 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5349 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5350 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5351 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5352 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5353 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5354 pIemCpu->cActiveMappings++;
5355
5356 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5357 *ppvMem = pbBuf;
5358 return VINF_SUCCESS;
5359}
5360
5361
5362/**
5363 * iemMemMap woker that deals with iemMemPageMap failures.
5364 */
5365static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5366 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5367{
5368 /*
5369 * Filter out conditions we can handle and the ones which shouldn't happen.
5370 */
5371 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5372 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5373 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5374 {
5375 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5376 return rcMap;
5377 }
5378 pIemCpu->cPotentialExits++;
5379
5380 /*
5381 * Read in the current memory content if it's a read, execute or partial
5382 * write access.
5383 */
5384 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5385 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5386 {
5387 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5388 memset(pbBuf, 0xff, cbMem);
5389 else
5390 {
5391 int rc;
5392 if (!pIemCpu->fBypassHandlers)
5393 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5394 else
5395 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5396 if (rc != VINF_SUCCESS)
5397 {
5398 /** @todo status code handling */
5399 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5400 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5401 return rc;
5402 }
5403 }
5404
5405#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5406 if ( !pIemCpu->fNoRem
5407 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5408 {
5409 /*
5410 * Record the read.
5411 */
5412 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5413 if (pEvtRec)
5414 {
5415 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5416 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5417 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5418 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5419 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5420 }
5421 }
5422#endif
5423 }
5424#ifdef VBOX_STRICT
5425 else
5426 memset(pbBuf, 0xcc, cbMem);
5427#endif
5428#ifdef VBOX_STRICT
5429 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5430 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5431#endif
5432
5433 /*
5434 * Commit the bounce buffer entry.
5435 */
5436 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5437 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5438 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5439 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5440 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5441 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5442 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5443 pIemCpu->cActiveMappings++;
5444
5445 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5446 *ppvMem = pbBuf;
5447 return VINF_SUCCESS;
5448}
5449
5450
5451
5452/**
5453 * Maps the specified guest memory for the given kind of access.
5454 *
5455 * This may be using bounce buffering of the memory if it's crossing a page
5456 * boundary or if there is an access handler installed for any of it. Because
5457 * of lock prefix guarantees, we're in for some extra clutter when this
5458 * happens.
5459 *
5460 * This may raise a \#GP, \#SS, \#PF or \#AC.
5461 *
5462 * @returns VBox strict status code.
5463 *
5464 * @param pIemCpu The IEM per CPU data.
5465 * @param ppvMem Where to return the pointer to the mapped
5466 * memory.
5467 * @param cbMem The number of bytes to map. This is usually 1,
5468 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5469 * string operations it can be up to a page.
5470 * @param iSegReg The index of the segment register to use for
5471 * this access. The base and limits are checked.
5472 * Use UINT8_MAX to indicate that no segmentation
5473 * is required (for IDT, GDT and LDT accesses).
5474 * @param GCPtrMem The address of the guest memory.
5475 * @param a_fAccess How the memory is being accessed. The
5476 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5477 * how to map the memory, while the
5478 * IEM_ACCESS_WHAT_XXX bit is used when raising
5479 * exceptions.
5480 */
5481static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5482{
5483 /*
5484 * Check the input and figure out which mapping entry to use.
5485 */
5486 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */
5487 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5488
5489 unsigned iMemMap = pIemCpu->iNextMapping;
5490 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5491 {
5492 iMemMap = iemMemMapFindFree(pIemCpu);
5493 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5494 }
5495
5496 /*
5497 * Map the memory, checking that we can actually access it. If something
5498 * slightly complicated happens, fall back on bounce buffering.
5499 */
5500 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5501 if (rcStrict != VINF_SUCCESS)
5502 return rcStrict;
5503
5504 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5505 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5506
5507 RTGCPHYS GCPhysFirst;
5508 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5509 if (rcStrict != VINF_SUCCESS)
5510 return rcStrict;
5511
5512 void *pvMem;
5513 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5514 if (rcStrict != VINF_SUCCESS)
5515 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5516
5517 /*
5518 * Fill in the mapping table entry.
5519 */
5520 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5521 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5522 pIemCpu->iNextMapping = iMemMap + 1;
5523 pIemCpu->cActiveMappings++;
5524
5525 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5526 *ppvMem = pvMem;
5527 return VINF_SUCCESS;
5528}
5529
5530
5531/**
5532 * Commits the guest memory if bounce buffered and unmaps it.
5533 *
5534 * @returns Strict VBox status code.
5535 * @param pIemCpu The IEM per CPU data.
5536 * @param pvMem The mapping.
5537 * @param fAccess The kind of access.
5538 */
5539static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5540{
5541 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5542 AssertReturn(iMemMap >= 0, iMemMap);
5543
5544 /* If it's bounce buffered, we may need to write back the buffer. */
5545 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5546 {
5547 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5548 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5549 }
5550 /* Otherwise unlock it. */
5551 else
5552 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5553
5554 /* Free the entry. */
5555 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5556 Assert(pIemCpu->cActiveMappings != 0);
5557 pIemCpu->cActiveMappings--;
5558 return VINF_SUCCESS;
5559}
5560
5561
5562/**
5563 * Fetches a data byte.
5564 *
5565 * @returns Strict VBox status code.
5566 * @param pIemCpu The IEM per CPU data.
5567 * @param pu8Dst Where to return the byte.
5568 * @param iSegReg The index of the segment register to use for
5569 * this access. The base and limits are checked.
5570 * @param GCPtrMem The address of the guest memory.
5571 */
5572static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5573{
5574 /* The lazy approach for now... */
5575 uint8_t const *pu8Src;
5576 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5577 if (rc == VINF_SUCCESS)
5578 {
5579 *pu8Dst = *pu8Src;
5580 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5581 }
5582 return rc;
5583}
5584
5585
5586/**
5587 * Fetches a data word.
5588 *
5589 * @returns Strict VBox status code.
5590 * @param pIemCpu The IEM per CPU data.
5591 * @param pu16Dst Where to return the word.
5592 * @param iSegReg The index of the segment register to use for
5593 * this access. The base and limits are checked.
5594 * @param GCPtrMem The address of the guest memory.
5595 */
5596static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5597{
5598 /* The lazy approach for now... */
5599 uint16_t const *pu16Src;
5600 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5601 if (rc == VINF_SUCCESS)
5602 {
5603 *pu16Dst = *pu16Src;
5604 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5605 }
5606 return rc;
5607}
5608
5609
5610/**
5611 * Fetches a data dword.
5612 *
5613 * @returns Strict VBox status code.
5614 * @param pIemCpu The IEM per CPU data.
5615 * @param pu32Dst Where to return the dword.
5616 * @param iSegReg The index of the segment register to use for
5617 * this access. The base and limits are checked.
5618 * @param GCPtrMem The address of the guest memory.
5619 */
5620static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5621{
5622 /* The lazy approach for now... */
5623 uint32_t const *pu32Src;
5624 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5625 if (rc == VINF_SUCCESS)
5626 {
5627 *pu32Dst = *pu32Src;
5628 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5629 }
5630 return rc;
5631}
5632
5633
5634#ifdef SOME_UNUSED_FUNCTION
5635/**
5636 * Fetches a data dword and sign extends it to a qword.
5637 *
5638 * @returns Strict VBox status code.
5639 * @param pIemCpu The IEM per CPU data.
5640 * @param pu64Dst Where to return the sign extended value.
5641 * @param iSegReg The index of the segment register to use for
5642 * this access. The base and limits are checked.
5643 * @param GCPtrMem The address of the guest memory.
5644 */
5645static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5646{
5647 /* The lazy approach for now... */
5648 int32_t const *pi32Src;
5649 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5650 if (rc == VINF_SUCCESS)
5651 {
5652 *pu64Dst = *pi32Src;
5653 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5654 }
5655#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5656 else
5657 *pu64Dst = 0;
5658#endif
5659 return rc;
5660}
5661#endif
5662
5663
5664/**
5665 * Fetches a data qword.
5666 *
5667 * @returns Strict VBox status code.
5668 * @param pIemCpu The IEM per CPU data.
5669 * @param pu64Dst Where to return the qword.
5670 * @param iSegReg The index of the segment register to use for
5671 * this access. The base and limits are checked.
5672 * @param GCPtrMem The address of the guest memory.
5673 */
5674static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5675{
5676 /* The lazy approach for now... */
5677 uint64_t const *pu64Src;
5678 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5679 if (rc == VINF_SUCCESS)
5680 {
5681 *pu64Dst = *pu64Src;
5682 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5683 }
5684 return rc;
5685}
5686
5687
5688/**
5689 * Fetches a data tword.
5690 *
5691 * @returns Strict VBox status code.
5692 * @param pIemCpu The IEM per CPU data.
5693 * @param pr80Dst Where to return the tword.
5694 * @param iSegReg The index of the segment register to use for
5695 * this access. The base and limits are checked.
5696 * @param GCPtrMem The address of the guest memory.
5697 */
5698static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5699{
5700 /* The lazy approach for now... */
5701 PCRTFLOAT80U pr80Src;
5702 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5703 if (rc == VINF_SUCCESS)
5704 {
5705 *pr80Dst = *pr80Src;
5706 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5707 }
5708 return rc;
5709}
5710
5711
5712/**
5713 * Fetches a descriptor register (lgdt, lidt).
5714 *
5715 * @returns Strict VBox status code.
5716 * @param pIemCpu The IEM per CPU data.
5717 * @param pcbLimit Where to return the limit.
5718 * @param pGCPTrBase Where to return the base.
5719 * @param iSegReg The index of the segment register to use for
5720 * this access. The base and limits are checked.
5721 * @param GCPtrMem The address of the guest memory.
5722 * @param enmOpSize The effective operand size.
5723 */
5724static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5725 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5726{
5727 uint8_t const *pu8Src;
5728 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5729 (void **)&pu8Src,
5730 enmOpSize == IEMMODE_64BIT
5731 ? 2 + 8
5732 : enmOpSize == IEMMODE_32BIT
5733 ? 2 + 4
5734 : 2 + 3,
5735 iSegReg,
5736 GCPtrMem,
5737 IEM_ACCESS_DATA_R);
5738 if (rcStrict == VINF_SUCCESS)
5739 {
5740 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5741 switch (enmOpSize)
5742 {
5743 case IEMMODE_16BIT:
5744 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5745 break;
5746 case IEMMODE_32BIT:
5747 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5748 break;
5749 case IEMMODE_64BIT:
5750 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5751 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5752 break;
5753
5754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5755 }
5756 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5757 }
5758 return rcStrict;
5759}
5760
5761
5762
5763/**
5764 * Stores a data byte.
5765 *
5766 * @returns Strict VBox status code.
5767 * @param pIemCpu The IEM per CPU data.
5768 * @param iSegReg The index of the segment register to use for
5769 * this access. The base and limits are checked.
5770 * @param GCPtrMem The address of the guest memory.
5771 * @param u8Value The value to store.
5772 */
5773static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5774{
5775 /* The lazy approach for now... */
5776 uint8_t *pu8Dst;
5777 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5778 if (rc == VINF_SUCCESS)
5779 {
5780 *pu8Dst = u8Value;
5781 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5782 }
5783 return rc;
5784}
5785
5786
5787/**
5788 * Stores a data word.
5789 *
5790 * @returns Strict VBox status code.
5791 * @param pIemCpu The IEM per CPU data.
5792 * @param iSegReg The index of the segment register to use for
5793 * this access. The base and limits are checked.
5794 * @param GCPtrMem The address of the guest memory.
5795 * @param u16Value The value to store.
5796 */
5797static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5798{
5799 /* The lazy approach for now... */
5800 uint16_t *pu16Dst;
5801 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5802 if (rc == VINF_SUCCESS)
5803 {
5804 *pu16Dst = u16Value;
5805 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5806 }
5807 return rc;
5808}
5809
5810
5811/**
5812 * Stores a data dword.
5813 *
5814 * @returns Strict VBox status code.
5815 * @param pIemCpu The IEM per CPU data.
5816 * @param iSegReg The index of the segment register to use for
5817 * this access. The base and limits are checked.
5818 * @param GCPtrMem The address of the guest memory.
5819 * @param u32Value The value to store.
5820 */
5821static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5822{
5823 /* The lazy approach for now... */
5824 uint32_t *pu32Dst;
5825 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5826 if (rc == VINF_SUCCESS)
5827 {
5828 *pu32Dst = u32Value;
5829 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5830 }
5831 return rc;
5832}
5833
5834
5835/**
5836 * Stores a data qword.
5837 *
5838 * @returns Strict VBox status code.
5839 * @param pIemCpu The IEM per CPU data.
5840 * @param iSegReg The index of the segment register to use for
5841 * this access. The base and limits are checked.
5842 * @param GCPtrMem The address of the guest memory.
5843 * @param u64Value The value to store.
5844 */
5845static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5846{
5847 /* The lazy approach for now... */
5848 uint64_t *pu64Dst;
5849 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5850 if (rc == VINF_SUCCESS)
5851 {
5852 *pu64Dst = u64Value;
5853 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5854 }
5855 return rc;
5856}
5857
5858
5859/**
5860 * Stores a descriptor register (sgdt, sidt).
5861 *
5862 * @returns Strict VBox status code.
5863 * @param pIemCpu The IEM per CPU data.
5864 * @param cbLimit The limit.
5865 * @param GCPTrBase The base address.
5866 * @param iSegReg The index of the segment register to use for
5867 * this access. The base and limits are checked.
5868 * @param GCPtrMem The address of the guest memory.
5869 * @param enmOpSize The effective operand size.
5870 */
5871static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5872 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5873{
5874 uint8_t *pu8Src;
5875 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5876 (void **)&pu8Src,
5877 enmOpSize == IEMMODE_64BIT
5878 ? 2 + 8
5879 : enmOpSize == IEMMODE_32BIT
5880 ? 2 + 4
5881 : 2 + 3,
5882 iSegReg,
5883 GCPtrMem,
5884 IEM_ACCESS_DATA_W);
5885 if (rcStrict == VINF_SUCCESS)
5886 {
5887 pu8Src[0] = RT_BYTE1(cbLimit);
5888 pu8Src[1] = RT_BYTE2(cbLimit);
5889 pu8Src[2] = RT_BYTE1(GCPtrBase);
5890 pu8Src[3] = RT_BYTE2(GCPtrBase);
5891 pu8Src[4] = RT_BYTE3(GCPtrBase);
5892 if (enmOpSize == IEMMODE_16BIT)
5893 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5894 else
5895 {
5896 pu8Src[5] = RT_BYTE4(GCPtrBase);
5897 if (enmOpSize == IEMMODE_64BIT)
5898 {
5899 pu8Src[6] = RT_BYTE5(GCPtrBase);
5900 pu8Src[7] = RT_BYTE6(GCPtrBase);
5901 pu8Src[8] = RT_BYTE7(GCPtrBase);
5902 pu8Src[9] = RT_BYTE8(GCPtrBase);
5903 }
5904 }
5905 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5906 }
5907 return rcStrict;
5908}
5909
5910
5911/**
5912 * Pushes a word onto the stack.
5913 *
5914 * @returns Strict VBox status code.
5915 * @param pIemCpu The IEM per CPU data.
5916 * @param u16Value The value to push.
5917 */
5918static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5919{
5920 /* Increment the stack pointer. */
5921 uint64_t uNewRsp;
5922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5923 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5924
5925 /* Write the word the lazy way. */
5926 uint16_t *pu16Dst;
5927 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5928 if (rc == VINF_SUCCESS)
5929 {
5930 *pu16Dst = u16Value;
5931 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5932 }
5933
5934 /* Commit the new RSP value unless we an access handler made trouble. */
5935 if (rc == VINF_SUCCESS)
5936 pCtx->rsp = uNewRsp;
5937
5938 return rc;
5939}
5940
5941
5942/**
5943 * Pushes a dword onto the stack.
5944 *
5945 * @returns Strict VBox status code.
5946 * @param pIemCpu The IEM per CPU data.
5947 * @param u32Value The value to push.
5948 */
5949static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5950{
5951 /* Increment the stack pointer. */
5952 uint64_t uNewRsp;
5953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5954 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5955
5956 /* Write the word the lazy way. */
5957 uint32_t *pu32Dst;
5958 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5959 if (rc == VINF_SUCCESS)
5960 {
5961 *pu32Dst = u32Value;
5962 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5963 }
5964
5965 /* Commit the new RSP value unless we an access handler made trouble. */
5966 if (rc == VINF_SUCCESS)
5967 pCtx->rsp = uNewRsp;
5968
5969 return rc;
5970}
5971
5972
5973/**
5974 * Pushes a qword onto the stack.
5975 *
5976 * @returns Strict VBox status code.
5977 * @param pIemCpu The IEM per CPU data.
5978 * @param u64Value The value to push.
5979 */
5980static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5981{
5982 /* Increment the stack pointer. */
5983 uint64_t uNewRsp;
5984 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5985 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5986
5987 /* Write the word the lazy way. */
5988 uint64_t *pu64Dst;
5989 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5990 if (rc == VINF_SUCCESS)
5991 {
5992 *pu64Dst = u64Value;
5993 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5994 }
5995
5996 /* Commit the new RSP value unless we an access handler made trouble. */
5997 if (rc == VINF_SUCCESS)
5998 pCtx->rsp = uNewRsp;
5999
6000 return rc;
6001}
6002
6003
6004/**
6005 * Pops a word from the stack.
6006 *
6007 * @returns Strict VBox status code.
6008 * @param pIemCpu The IEM per CPU data.
6009 * @param pu16Value Where to store the popped value.
6010 */
6011static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
6012{
6013 /* Increment the stack pointer. */
6014 uint64_t uNewRsp;
6015 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6016 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
6017
6018 /* Write the word the lazy way. */
6019 uint16_t const *pu16Src;
6020 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6021 if (rc == VINF_SUCCESS)
6022 {
6023 *pu16Value = *pu16Src;
6024 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6025
6026 /* Commit the new RSP value. */
6027 if (rc == VINF_SUCCESS)
6028 pCtx->rsp = uNewRsp;
6029 }
6030
6031 return rc;
6032}
6033
6034
6035/**
6036 * Pops a dword from the stack.
6037 *
6038 * @returns Strict VBox status code.
6039 * @param pIemCpu The IEM per CPU data.
6040 * @param pu32Value Where to store the popped value.
6041 */
6042static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
6043{
6044 /* Increment the stack pointer. */
6045 uint64_t uNewRsp;
6046 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6047 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
6048
6049 /* Write the word the lazy way. */
6050 uint32_t const *pu32Src;
6051 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6052 if (rc == VINF_SUCCESS)
6053 {
6054 *pu32Value = *pu32Src;
6055 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6056
6057 /* Commit the new RSP value. */
6058 if (rc == VINF_SUCCESS)
6059 pCtx->rsp = uNewRsp;
6060 }
6061
6062 return rc;
6063}
6064
6065
6066/**
6067 * Pops a qword from the stack.
6068 *
6069 * @returns Strict VBox status code.
6070 * @param pIemCpu The IEM per CPU data.
6071 * @param pu64Value Where to store the popped value.
6072 */
6073static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
6074{
6075 /* Increment the stack pointer. */
6076 uint64_t uNewRsp;
6077 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6078 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
6079
6080 /* Write the word the lazy way. */
6081 uint64_t const *pu64Src;
6082 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6083 if (rc == VINF_SUCCESS)
6084 {
6085 *pu64Value = *pu64Src;
6086 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6087
6088 /* Commit the new RSP value. */
6089 if (rc == VINF_SUCCESS)
6090 pCtx->rsp = uNewRsp;
6091 }
6092
6093 return rc;
6094}
6095
6096
6097/**
6098 * Pushes a word onto the stack, using a temporary stack pointer.
6099 *
6100 * @returns Strict VBox status code.
6101 * @param pIemCpu The IEM per CPU data.
6102 * @param u16Value The value to push.
6103 * @param pTmpRsp Pointer to the temporary stack pointer.
6104 */
6105static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
6106{
6107 /* Increment the stack pointer. */
6108 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6109 RTUINT64U NewRsp = *pTmpRsp;
6110 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
6111
6112 /* Write the word the lazy way. */
6113 uint16_t *pu16Dst;
6114 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6115 if (rc == VINF_SUCCESS)
6116 {
6117 *pu16Dst = u16Value;
6118 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6119 }
6120
6121 /* Commit the new RSP value unless we an access handler made trouble. */
6122 if (rc == VINF_SUCCESS)
6123 *pTmpRsp = NewRsp;
6124
6125 return rc;
6126}
6127
6128
6129/**
6130 * Pushes a dword onto the stack, using a temporary stack pointer.
6131 *
6132 * @returns Strict VBox status code.
6133 * @param pIemCpu The IEM per CPU data.
6134 * @param u32Value The value to push.
6135 * @param pTmpRsp Pointer to the temporary stack pointer.
6136 */
6137static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
6138{
6139 /* Increment the stack pointer. */
6140 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6141 RTUINT64U NewRsp = *pTmpRsp;
6142 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
6143
6144 /* Write the word the lazy way. */
6145 uint32_t *pu32Dst;
6146 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6147 if (rc == VINF_SUCCESS)
6148 {
6149 *pu32Dst = u32Value;
6150 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6151 }
6152
6153 /* Commit the new RSP value unless we an access handler made trouble. */
6154 if (rc == VINF_SUCCESS)
6155 *pTmpRsp = NewRsp;
6156
6157 return rc;
6158}
6159
6160
6161/**
6162 * Pushes a dword onto the stack, using a temporary stack pointer.
6163 *
6164 * @returns Strict VBox status code.
6165 * @param pIemCpu The IEM per CPU data.
6166 * @param u64Value The value to push.
6167 * @param pTmpRsp Pointer to the temporary stack pointer.
6168 */
6169static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
6170{
6171 /* Increment the stack pointer. */
6172 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6173 RTUINT64U NewRsp = *pTmpRsp;
6174 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
6175
6176 /* Write the word the lazy way. */
6177 uint64_t *pu64Dst;
6178 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6179 if (rc == VINF_SUCCESS)
6180 {
6181 *pu64Dst = u64Value;
6182 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6183 }
6184
6185 /* Commit the new RSP value unless we an access handler made trouble. */
6186 if (rc == VINF_SUCCESS)
6187 *pTmpRsp = NewRsp;
6188
6189 return rc;
6190}
6191
6192
6193/**
6194 * Pops a word from the stack, using a temporary stack pointer.
6195 *
6196 * @returns Strict VBox status code.
6197 * @param pIemCpu The IEM per CPU data.
6198 * @param pu16Value Where to store the popped value.
6199 * @param pTmpRsp Pointer to the temporary stack pointer.
6200 */
6201static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
6202{
6203 /* Increment the stack pointer. */
6204 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6205 RTUINT64U NewRsp = *pTmpRsp;
6206 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
6207
6208 /* Write the word the lazy way. */
6209 uint16_t const *pu16Src;
6210 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6211 if (rc == VINF_SUCCESS)
6212 {
6213 *pu16Value = *pu16Src;
6214 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6215
6216 /* Commit the new RSP value. */
6217 if (rc == VINF_SUCCESS)
6218 *pTmpRsp = NewRsp;
6219 }
6220
6221 return rc;
6222}
6223
6224
6225/**
6226 * Pops a dword from the stack, using a temporary stack pointer.
6227 *
6228 * @returns Strict VBox status code.
6229 * @param pIemCpu The IEM per CPU data.
6230 * @param pu32Value Where to store the popped value.
6231 * @param pTmpRsp Pointer to the temporary stack pointer.
6232 */
6233static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
6234{
6235 /* Increment the stack pointer. */
6236 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6237 RTUINT64U NewRsp = *pTmpRsp;
6238 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
6239
6240 /* Write the word the lazy way. */
6241 uint32_t const *pu32Src;
6242 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6243 if (rc == VINF_SUCCESS)
6244 {
6245 *pu32Value = *pu32Src;
6246 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6247
6248 /* Commit the new RSP value. */
6249 if (rc == VINF_SUCCESS)
6250 *pTmpRsp = NewRsp;
6251 }
6252
6253 return rc;
6254}
6255
6256
6257/**
6258 * Pops a qword from the stack, using a temporary stack pointer.
6259 *
6260 * @returns Strict VBox status code.
6261 * @param pIemCpu The IEM per CPU data.
6262 * @param pu64Value Where to store the popped value.
6263 * @param pTmpRsp Pointer to the temporary stack pointer.
6264 */
6265static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
6266{
6267 /* Increment the stack pointer. */
6268 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6269 RTUINT64U NewRsp = *pTmpRsp;
6270 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6271
6272 /* Write the word the lazy way. */
6273 uint64_t const *pu64Src;
6274 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6275 if (rcStrict == VINF_SUCCESS)
6276 {
6277 *pu64Value = *pu64Src;
6278 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6279
6280 /* Commit the new RSP value. */
6281 if (rcStrict == VINF_SUCCESS)
6282 *pTmpRsp = NewRsp;
6283 }
6284
6285 return rcStrict;
6286}
6287
6288
6289/**
6290 * Begin a special stack push (used by interrupt, exceptions and such).
6291 *
6292 * This will raise #SS or #PF if appropriate.
6293 *
6294 * @returns Strict VBox status code.
6295 * @param pIemCpu The IEM per CPU data.
6296 * @param cbMem The number of bytes to push onto the stack.
6297 * @param ppvMem Where to return the pointer to the stack memory.
6298 * As with the other memory functions this could be
6299 * direct access or bounce buffered access, so
6300 * don't commit register until the commit call
6301 * succeeds.
6302 * @param puNewRsp Where to return the new RSP value. This must be
6303 * passed unchanged to
6304 * iemMemStackPushCommitSpecial().
6305 */
6306static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6307{
6308 Assert(cbMem < UINT8_MAX);
6309 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6310 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
6311 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6312}
6313
6314
6315/**
6316 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6317 *
6318 * This will update the rSP.
6319 *
6320 * @returns Strict VBox status code.
6321 * @param pIemCpu The IEM per CPU data.
6322 * @param pvMem The pointer returned by
6323 * iemMemStackPushBeginSpecial().
6324 * @param uNewRsp The new RSP value returned by
6325 * iemMemStackPushBeginSpecial().
6326 */
6327static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6328{
6329 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6330 if (rcStrict == VINF_SUCCESS)
6331 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6332 return rcStrict;
6333}
6334
6335
6336/**
6337 * Begin a special stack pop (used by iret, retf and such).
6338 *
6339 * This will raise \#SS or \#PF if appropriate.
6340 *
6341 * @returns Strict VBox status code.
6342 * @param pIemCpu The IEM per CPU data.
6343 * @param cbMem The number of bytes to push onto the stack.
6344 * @param ppvMem Where to return the pointer to the stack memory.
6345 * @param puNewRsp Where to return the new RSP value. This must be
6346 * passed unchanged to
6347 * iemMemStackPopCommitSpecial() or applied
6348 * manually if iemMemStackPopDoneSpecial() is used.
6349 */
6350static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6351{
6352 Assert(cbMem < UINT8_MAX);
6353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6354 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
6355 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6356}
6357
6358
6359/**
6360 * Continue a special stack pop (used by iret and retf).
6361 *
6362 * This will raise \#SS or \#PF if appropriate.
6363 *
6364 * @returns Strict VBox status code.
6365 * @param pIemCpu The IEM per CPU data.
6366 * @param cbMem The number of bytes to push onto the stack.
6367 * @param ppvMem Where to return the pointer to the stack memory.
6368 * @param puNewRsp Where to return the new RSP value. This must be
6369 * passed unchanged to
6370 * iemMemStackPopCommitSpecial() or applied
6371 * manually if iemMemStackPopDoneSpecial() is used.
6372 */
6373static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6374{
6375 Assert(cbMem < UINT8_MAX);
6376 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6377 RTUINT64U NewRsp;
6378 NewRsp.u = *puNewRsp;
6379 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6380 *puNewRsp = NewRsp.u;
6381 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6382}
6383
6384
6385/**
6386 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6387 *
6388 * This will update the rSP.
6389 *
6390 * @returns Strict VBox status code.
6391 * @param pIemCpu The IEM per CPU data.
6392 * @param pvMem The pointer returned by
6393 * iemMemStackPopBeginSpecial().
6394 * @param uNewRsp The new RSP value returned by
6395 * iemMemStackPopBeginSpecial().
6396 */
6397static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6398{
6399 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6400 if (rcStrict == VINF_SUCCESS)
6401 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6402 return rcStrict;
6403}
6404
6405
6406/**
6407 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6408 * iemMemStackPopContinueSpecial).
6409 *
6410 * The caller will manually commit the rSP.
6411 *
6412 * @returns Strict VBox status code.
6413 * @param pIemCpu The IEM per CPU data.
6414 * @param pvMem The pointer returned by
6415 * iemMemStackPopBeginSpecial() or
6416 * iemMemStackPopContinueSpecial().
6417 */
6418static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6419{
6420 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6421}
6422
6423
6424/**
6425 * Fetches a system table dword.
6426 *
6427 * @returns Strict VBox status code.
6428 * @param pIemCpu The IEM per CPU data.
6429 * @param pu32Dst Where to return the dword.
6430 * @param iSegReg The index of the segment register to use for
6431 * this access. The base and limits are checked.
6432 * @param GCPtrMem The address of the guest memory.
6433 */
6434static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6435{
6436 /* The lazy approach for now... */
6437 uint32_t const *pu32Src;
6438 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6439 if (rc == VINF_SUCCESS)
6440 {
6441 *pu32Dst = *pu32Src;
6442 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6443 }
6444 return rc;
6445}
6446
6447
6448/**
6449 * Fetches a system table qword.
6450 *
6451 * @returns Strict VBox status code.
6452 * @param pIemCpu The IEM per CPU data.
6453 * @param pu64Dst Where to return the qword.
6454 * @param iSegReg The index of the segment register to use for
6455 * this access. The base and limits are checked.
6456 * @param GCPtrMem The address of the guest memory.
6457 */
6458static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6459{
6460 /* The lazy approach for now... */
6461 uint64_t const *pu64Src;
6462 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6463 if (rc == VINF_SUCCESS)
6464 {
6465 *pu64Dst = *pu64Src;
6466 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6467 }
6468 return rc;
6469}
6470
6471
6472/**
6473 * Fetches a descriptor table entry.
6474 *
6475 * @returns Strict VBox status code.
6476 * @param pIemCpu The IEM per CPU.
6477 * @param pDesc Where to return the descriptor table entry.
6478 * @param uSel The selector which table entry to fetch.
6479 */
6480static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6481{
6482 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6483
6484 /** @todo did the 286 require all 8 bytes to be accessible? */
6485 /*
6486 * Get the selector table base and check bounds.
6487 */
6488 RTGCPTR GCPtrBase;
6489 if (uSel & X86_SEL_LDT)
6490 {
6491 if ( !pCtx->ldtr.Attr.n.u1Present
6492 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6493 {
6494 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6495 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6496 /** @todo is this the right exception? */
6497 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6498 }
6499
6500 Assert(pCtx->ldtr.Attr.n.u1Present);
6501 GCPtrBase = pCtx->ldtr.u64Base;
6502 }
6503 else
6504 {
6505 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6506 {
6507 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6508 /** @todo is this the right exception? */
6509 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6510 }
6511 GCPtrBase = pCtx->gdtr.pGdt;
6512 }
6513
6514 /*
6515 * Read the legacy descriptor and maybe the long mode extensions if
6516 * required.
6517 */
6518 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6519 if (rcStrict == VINF_SUCCESS)
6520 {
6521 if ( !IEM_IS_LONG_MODE(pIemCpu)
6522 || pDesc->Legacy.Gen.u1DescType)
6523 pDesc->Long.au64[1] = 0;
6524 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6525 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6526 else
6527 {
6528 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6529 /** @todo is this the right exception? */
6530 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6531 }
6532 }
6533 return rcStrict;
6534}
6535
6536
6537/**
6538 * Fakes a long mode stack selector for SS = 0.
6539 *
6540 * @param pDescSs Where to return the fake stack descriptor.
6541 * @param uDpl The DPL we want.
6542 */
6543static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6544{
6545 pDescSs->Long.au64[0] = 0;
6546 pDescSs->Long.au64[1] = 0;
6547 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6548 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6549 pDescSs->Long.Gen.u2Dpl = uDpl;
6550 pDescSs->Long.Gen.u1Present = 1;
6551 pDescSs->Long.Gen.u1Long = 1;
6552}
6553
6554
6555/**
6556 * Marks the selector descriptor as accessed (only non-system descriptors).
6557 *
6558 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6559 * will therefore skip the limit checks.
6560 *
6561 * @returns Strict VBox status code.
6562 * @param pIemCpu The IEM per CPU.
6563 * @param uSel The selector.
6564 */
6565static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6566{
6567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6568
6569 /*
6570 * Get the selector table base and calculate the entry address.
6571 */
6572 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6573 ? pCtx->ldtr.u64Base
6574 : pCtx->gdtr.pGdt;
6575 GCPtr += uSel & X86_SEL_MASK;
6576
6577 /*
6578 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6579 * ugly stuff to avoid this. This will make sure it's an atomic access
6580 * as well more or less remove any question about 8-bit or 32-bit accesss.
6581 */
6582 VBOXSTRICTRC rcStrict;
6583 uint32_t volatile *pu32;
6584 if ((GCPtr & 3) == 0)
6585 {
6586 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6587 GCPtr += 2 + 2;
6588 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6589 if (rcStrict != VINF_SUCCESS)
6590 return rcStrict;
6591 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6592 }
6593 else
6594 {
6595 /* The misaligned GDT/LDT case, map the whole thing. */
6596 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6597 if (rcStrict != VINF_SUCCESS)
6598 return rcStrict;
6599 switch ((uintptr_t)pu32 & 3)
6600 {
6601 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6602 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6603 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6604 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6605 }
6606 }
6607
6608 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6609}
6610
6611/** @} */
6612
6613
6614/*
6615 * Include the C/C++ implementation of instruction.
6616 */
6617#include "IEMAllCImpl.cpp.h"
6618
6619
6620
6621/** @name "Microcode" macros.
6622 *
6623 * The idea is that we should be able to use the same code to interpret
6624 * instructions as well as recompiler instructions. Thus this obfuscation.
6625 *
6626 * @{
6627 */
6628#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6629#define IEM_MC_END() }
6630#define IEM_MC_PAUSE() do {} while (0)
6631#define IEM_MC_CONTINUE() do {} while (0)
6632
6633/** Internal macro. */
6634#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6635 do \
6636 { \
6637 VBOXSTRICTRC rcStrict2 = a_Expr; \
6638 if (rcStrict2 != VINF_SUCCESS) \
6639 return rcStrict2; \
6640 } while (0)
6641
6642#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6643#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6644#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6645#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6646#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6647#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6648#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6649
6650#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6651#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6652 do { \
6653 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6654 return iemRaiseDeviceNotAvailable(pIemCpu); \
6655 } while (0)
6656#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6657 do { \
6658 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6659 return iemRaiseMathFault(pIemCpu); \
6660 } while (0)
6661#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6662 do { \
6663 if (pIemCpu->uCpl != 0) \
6664 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6665 } while (0)
6666
6667
6668#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6669#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6670#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6671#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6672#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6673#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6674#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6675 uint32_t a_Name; \
6676 uint32_t *a_pName = &a_Name
6677#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6678 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6679
6680#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6681#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6682
6683#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6684#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6685#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6686#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6687#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6688#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6689#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6690#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6691#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6692#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6693#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6694#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6695#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6696#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6697#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6698#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6699#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6700#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6701#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6702#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6703#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6704#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6705#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6706#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6707#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6708#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6709#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6710#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6711#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6712/** @note Not for IOPL or IF testing or modification. */
6713#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6714#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6715#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6716#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6717
6718#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6719#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6720#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6721#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6722#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6723#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6724#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6725#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6726#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6727#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6728#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6729 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6730
6731#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6732#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6733/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6734 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6735#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6736#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6737/** @note Not for IOPL or IF testing or modification. */
6738#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6739
6740#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6741#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6742#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6743 do { \
6744 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6745 *pu32Reg += (a_u32Value); \
6746 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6747 } while (0)
6748#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6749
6750#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6751#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6752#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6753 do { \
6754 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6755 *pu32Reg -= (a_u32Value); \
6756 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6757 } while (0)
6758#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6759
6760#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6761#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6762#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6763#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6764#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6765#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6766#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6767
6768#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6769#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6770#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6771#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6772
6773#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6774#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6775#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6776
6777#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6778#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6779
6780#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6781#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6782#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6783
6784#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6785#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6786#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6787
6788#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6789
6790#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6791
6792#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6793#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6794#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6795 do { \
6796 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6797 *pu32Reg &= (a_u32Value); \
6798 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6799 } while (0)
6800#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6801
6802#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6803#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6804#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6805 do { \
6806 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6807 *pu32Reg |= (a_u32Value); \
6808 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6809 } while (0)
6810#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6811
6812
6813/** @note Not for IOPL or IF modification. */
6814#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6815/** @note Not for IOPL or IF modification. */
6816#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6817/** @note Not for IOPL or IF modification. */
6818#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6819
6820#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6821
6822
6823#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6825#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6827#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6829
6830#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6832#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6834#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6836
6837#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6839#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6841#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6843
6844#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6846
6847#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6848 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6849#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6850 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6851
6852#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6853 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6854#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6856#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6858
6859
6860#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6861 do { \
6862 uint8_t u8Tmp; \
6863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6864 (a_u16Dst) = u8Tmp; \
6865 } while (0)
6866#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6867 do { \
6868 uint8_t u8Tmp; \
6869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6870 (a_u32Dst) = u8Tmp; \
6871 } while (0)
6872#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6873 do { \
6874 uint8_t u8Tmp; \
6875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6876 (a_u64Dst) = u8Tmp; \
6877 } while (0)
6878#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6879 do { \
6880 uint16_t u16Tmp; \
6881 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6882 (a_u32Dst) = u16Tmp; \
6883 } while (0)
6884#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6885 do { \
6886 uint16_t u16Tmp; \
6887 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6888 (a_u64Dst) = u16Tmp; \
6889 } while (0)
6890#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6891 do { \
6892 uint32_t u32Tmp; \
6893 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6894 (a_u64Dst) = u32Tmp; \
6895 } while (0)
6896
6897#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6898 do { \
6899 uint8_t u8Tmp; \
6900 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6901 (a_u16Dst) = (int8_t)u8Tmp; \
6902 } while (0)
6903#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6904 do { \
6905 uint8_t u8Tmp; \
6906 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6907 (a_u32Dst) = (int8_t)u8Tmp; \
6908 } while (0)
6909#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6910 do { \
6911 uint8_t u8Tmp; \
6912 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6913 (a_u64Dst) = (int8_t)u8Tmp; \
6914 } while (0)
6915#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6916 do { \
6917 uint16_t u16Tmp; \
6918 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6919 (a_u32Dst) = (int16_t)u16Tmp; \
6920 } while (0)
6921#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6922 do { \
6923 uint16_t u16Tmp; \
6924 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6925 (a_u64Dst) = (int16_t)u16Tmp; \
6926 } while (0)
6927#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6928 do { \
6929 uint32_t u32Tmp; \
6930 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6931 (a_u64Dst) = (int32_t)u32Tmp; \
6932 } while (0)
6933
6934#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6936#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6937 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6938#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6939 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6940#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6941 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6942
6943#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6944 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6945#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6946 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6947#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6948 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6949#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6950 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6951
6952#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6953#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6954#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6955#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6956#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6957#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6958#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6959 do { \
6960 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6961 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6962 } while (0)
6963
6964
6965#define IEM_MC_PUSH_U16(a_u16Value) \
6966 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6967#define IEM_MC_PUSH_U32(a_u32Value) \
6968 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6969#define IEM_MC_PUSH_U64(a_u64Value) \
6970 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6971
6972#define IEM_MC_POP_U16(a_pu16Value) \
6973 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6974#define IEM_MC_POP_U32(a_pu32Value) \
6975 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6976#define IEM_MC_POP_U64(a_pu64Value) \
6977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6978
6979/** Maps guest memory for direct or bounce buffered access.
6980 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6981 * @remarks May return.
6982 */
6983#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6984 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6985
6986/** Maps guest memory for direct or bounce buffered access.
6987 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6988 * @remarks May return.
6989 */
6990#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6991 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6992
6993/** Commits the memory and unmaps the guest memory.
6994 * @remarks May return.
6995 */
6996#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6997 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6998
6999/** Commits the memory and unmaps the guest memory unless the FPU status word
7000 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
7001 * that would cause FLD not to store.
7002 *
7003 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
7004 * store, while \#P will not.
7005 *
7006 * @remarks May in theory return - for now.
7007 */
7008#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
7009 do { \
7010 if ( !(a_u16FSW & X86_FSW_ES) \
7011 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
7012 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
7013 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
7014 } while (0)
7015
7016/** Calculate efficient address from R/M. */
7017#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
7018 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
7019
7020#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
7021#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
7022#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
7023#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
7024#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
7025#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
7026#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
7027
7028/**
7029 * Defers the rest of the instruction emulation to a C implementation routine
7030 * and returns, only taking the standard parameters.
7031 *
7032 * @param a_pfnCImpl The pointer to the C routine.
7033 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7034 */
7035#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7036
7037/**
7038 * Defers the rest of instruction emulation to a C implementation routine and
7039 * returns, taking one argument in addition to the standard ones.
7040 *
7041 * @param a_pfnCImpl The pointer to the C routine.
7042 * @param a0 The argument.
7043 */
7044#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7045
7046/**
7047 * Defers the rest of the instruction emulation to a C implementation routine
7048 * and returns, taking two arguments in addition to the standard ones.
7049 *
7050 * @param a_pfnCImpl The pointer to the C routine.
7051 * @param a0 The first extra argument.
7052 * @param a1 The second extra argument.
7053 */
7054#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7055
7056/**
7057 * Defers the rest of the instruction emulation to a C implementation routine
7058 * and returns, taking two arguments in addition to the standard ones.
7059 *
7060 * @param a_pfnCImpl The pointer to the C routine.
7061 * @param a0 The first extra argument.
7062 * @param a1 The second extra argument.
7063 * @param a2 The third extra argument.
7064 */
7065#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7066
7067/**
7068 * Defers the rest of the instruction emulation to a C implementation routine
7069 * and returns, taking two arguments in addition to the standard ones.
7070 *
7071 * @param a_pfnCImpl The pointer to the C routine.
7072 * @param a0 The first extra argument.
7073 * @param a1 The second extra argument.
7074 * @param a2 The third extra argument.
7075 * @param a3 The fourth extra argument.
7076 * @param a4 The fifth extra argument.
7077 */
7078#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
7079
7080/**
7081 * Defers the entire instruction emulation to a C implementation routine and
7082 * returns, only taking the standard parameters.
7083 *
7084 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7085 *
7086 * @param a_pfnCImpl The pointer to the C routine.
7087 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7088 */
7089#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7090
7091/**
7092 * Defers the entire instruction emulation to a C implementation routine and
7093 * returns, taking one argument in addition to the standard ones.
7094 *
7095 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7096 *
7097 * @param a_pfnCImpl The pointer to the C routine.
7098 * @param a0 The argument.
7099 */
7100#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7101
7102/**
7103 * Defers the entire instruction emulation to a C implementation routine and
7104 * returns, taking two arguments in addition to the standard ones.
7105 *
7106 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7107 *
7108 * @param a_pfnCImpl The pointer to the C routine.
7109 * @param a0 The first extra argument.
7110 * @param a1 The second extra argument.
7111 */
7112#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7113
7114/**
7115 * Defers the entire instruction emulation to a C implementation routine and
7116 * returns, taking three arguments in addition to the standard ones.
7117 *
7118 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7119 *
7120 * @param a_pfnCImpl The pointer to the C routine.
7121 * @param a0 The first extra argument.
7122 * @param a1 The second extra argument.
7123 * @param a2 The third extra argument.
7124 */
7125#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7126
7127/**
7128 * Calls a FPU assembly implementation taking one visible argument.
7129 *
7130 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7131 * @param a0 The first extra argument.
7132 */
7133#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
7134 do { \
7135 iemFpuPrepareUsage(pIemCpu); \
7136 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
7137 } while (0)
7138
7139/**
7140 * Calls a FPU assembly implementation taking two visible arguments.
7141 *
7142 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7143 * @param a0 The first extra argument.
7144 * @param a1 The second extra argument.
7145 */
7146#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
7147 do { \
7148 iemFpuPrepareUsage(pIemCpu); \
7149 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7150 } while (0)
7151
7152/**
7153 * Calls a FPU assembly implementation taking three visible arguments.
7154 *
7155 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7156 * @param a0 The first extra argument.
7157 * @param a1 The second extra argument.
7158 * @param a2 The third extra argument.
7159 */
7160#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7161 do { \
7162 iemFpuPrepareUsage(pIemCpu); \
7163 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7164 } while (0)
7165
7166#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
7167 do { \
7168 (a_FpuData).FSW = (a_FSW); \
7169 (a_FpuData).r80Result = *(a_pr80Value); \
7170 } while (0)
7171
7172/** Pushes FPU result onto the stack. */
7173#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
7174 iemFpuPushResult(pIemCpu, &a_FpuData)
7175/** Pushes FPU result onto the stack and sets the FPUDP. */
7176#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
7177 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
7178
7179/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
7180#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
7181 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
7182
7183/** Stores FPU result in a stack register. */
7184#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
7185 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
7186/** Stores FPU result in a stack register and pops the stack. */
7187#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
7188 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
7189/** Stores FPU result in a stack register and sets the FPUDP. */
7190#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7191 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7192/** Stores FPU result in a stack register, sets the FPUDP, and pops the
7193 * stack. */
7194#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7195 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7196
7197/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
7198#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
7199 iemFpuUpdateOpcodeAndIp(pIemCpu)
7200/** Free a stack register (for FFREE and FFREEP). */
7201#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
7202 iemFpuStackFree(pIemCpu, a_iStReg)
7203/** Increment the FPU stack pointer. */
7204#define IEM_MC_FPU_STACK_INC_TOP() \
7205 iemFpuStackIncTop(pIemCpu)
7206/** Decrement the FPU stack pointer. */
7207#define IEM_MC_FPU_STACK_DEC_TOP() \
7208 iemFpuStackDecTop(pIemCpu)
7209
7210/** Updates the FSW, FOP, FPUIP, and FPUCS. */
7211#define IEM_MC_UPDATE_FSW(a_u16FSW) \
7212 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7213/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
7214#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
7215 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7216/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
7217#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7218 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7219/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
7220#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
7221 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7222/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
7223 * stack. */
7224#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7225 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7226/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
7227#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
7228 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7229
7230/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
7231#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
7232 iemFpuStackUnderflow(pIemCpu, a_iStDst)
7233/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7234 * stack. */
7235#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
7236 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
7237/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7238 * FPUDS. */
7239#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7240 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7241/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7242 * FPUDS. Pops stack. */
7243#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
7244 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
7245/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7246 * stack twice. */
7247#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
7248 iemFpuStackUnderflowThenPopPop(pIemCpu)
7249/** Raises a FPU stack underflow exception for an instruction pushing a result
7250 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
7251#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
7252 iemFpuStackPushUnderflow(pIemCpu)
7253/** Raises a FPU stack underflow exception for an instruction pushing a result
7254 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
7255#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
7256 iemFpuStackPushUnderflowTwo(pIemCpu)
7257
7258/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7259 * FPUIP, FPUCS and FOP. */
7260#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
7261 iemFpuStackPushOverflow(pIemCpu)
7262/** Raises a FPU stack overflow exception as part of a push attempt. Sets
7263 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
7264#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
7265 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
7266/** Indicates that we (might) have modified the FPU state. */
7267#define IEM_MC_USED_FPU() \
7268 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
7269
7270/** @note Not for IOPL or IF testing. */
7271#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
7272/** @note Not for IOPL or IF testing. */
7273#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
7274/** @note Not for IOPL or IF testing. */
7275#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
7276/** @note Not for IOPL or IF testing. */
7277#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
7278/** @note Not for IOPL or IF testing. */
7279#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
7280 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7281 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7282/** @note Not for IOPL or IF testing. */
7283#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7284 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7285 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7286/** @note Not for IOPL or IF testing. */
7287#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7288 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7289 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7290 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7291/** @note Not for IOPL or IF testing. */
7292#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7293 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7294 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7295 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7296#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7297#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7298#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7299/** @note Not for IOPL or IF testing. */
7300#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7301 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7302 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7303/** @note Not for IOPL or IF testing. */
7304#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7305 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7306 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7307/** @note Not for IOPL or IF testing. */
7308#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7309 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7310 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7311/** @note Not for IOPL or IF testing. */
7312#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7313 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7314 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7315/** @note Not for IOPL or IF testing. */
7316#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7317 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7318 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7319/** @note Not for IOPL or IF testing. */
7320#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7321 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7322 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7323#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7324#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7325#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7326 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7327#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7328 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7329#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7330 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7331#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7332 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7333#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7334 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7335#define IEM_MC_IF_FCW_IM() \
7336 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7337
7338#define IEM_MC_ELSE() } else {
7339#define IEM_MC_ENDIF() } do {} while (0)
7340
7341/** @} */
7342
7343
7344/** @name Opcode Debug Helpers.
7345 * @{
7346 */
7347#ifdef DEBUG
7348# define IEMOP_MNEMONIC(a_szMnemonic) \
7349 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7350 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7351# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7352 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7353 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7354#else
7355# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7356# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7357#endif
7358
7359/** @} */
7360
7361
7362/** @name Opcode Helpers.
7363 * @{
7364 */
7365
7366/** The instruction raises an \#UD in real and V8086 mode. */
7367#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7368 do \
7369 { \
7370 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7371 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7372 } while (0)
7373
7374/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7375 * lock prefixed.
7376 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7377#define IEMOP_HLP_NO_LOCK_PREFIX() \
7378 do \
7379 { \
7380 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7381 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7382 } while (0)
7383
7384/** The instruction is not available in 64-bit mode, throw #UD if we're in
7385 * 64-bit mode. */
7386#define IEMOP_HLP_NO_64BIT() \
7387 do \
7388 { \
7389 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7390 return IEMOP_RAISE_INVALID_OPCODE(); \
7391 } while (0)
7392
7393/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7394#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7395 do \
7396 { \
7397 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7398 iemRecalEffOpSize64Default(pIemCpu); \
7399 } while (0)
7400
7401/** The instruction has 64-bit operand size if 64-bit mode. */
7402#define IEMOP_HLP_64BIT_OP_SIZE() \
7403 do \
7404 { \
7405 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7406 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7407 } while (0)
7408
7409/**
7410 * Done decoding.
7411 */
7412#define IEMOP_HLP_DONE_DECODING() \
7413 do \
7414 { \
7415 /*nothing for now, maybe later... */ \
7416 } while (0)
7417
7418/**
7419 * Done decoding, raise \#UD exception if lock prefix present.
7420 */
7421#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7422 do \
7423 { \
7424 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7425 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7426 } while (0)
7427
7428
7429/**
7430 * Calculates the effective address of a ModR/M memory operand.
7431 *
7432 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7433 *
7434 * @return Strict VBox status code.
7435 * @param pIemCpu The IEM per CPU data.
7436 * @param bRm The ModRM byte.
7437 * @param cbImm The size of any immediate following the
7438 * effective address opcode bytes. Important for
7439 * RIP relative addressing.
7440 * @param pGCPtrEff Where to return the effective address.
7441 */
7442static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
7443{
7444 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7445 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7446#define SET_SS_DEF() \
7447 do \
7448 { \
7449 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7450 pIemCpu->iEffSeg = X86_SREG_SS; \
7451 } while (0)
7452
7453 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
7454 {
7455/** @todo Check the effective address size crap! */
7456 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
7457 {
7458 uint16_t u16EffAddr;
7459
7460 /* Handle the disp16 form with no registers first. */
7461 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7462 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7463 else
7464 {
7465 /* Get the displacment. */
7466 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7467 {
7468 case 0: u16EffAddr = 0; break;
7469 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7470 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7471 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7472 }
7473
7474 /* Add the base and index registers to the disp. */
7475 switch (bRm & X86_MODRM_RM_MASK)
7476 {
7477 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7478 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7479 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7480 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7481 case 4: u16EffAddr += pCtx->si; break;
7482 case 5: u16EffAddr += pCtx->di; break;
7483 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7484 case 7: u16EffAddr += pCtx->bx; break;
7485 }
7486 }
7487
7488 *pGCPtrEff = u16EffAddr;
7489 }
7490 else
7491 {
7492 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
7493 uint32_t u32EffAddr;
7494
7495 /* Handle the disp32 form with no registers first. */
7496 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7497 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7498 else
7499 {
7500 /* Get the register (or SIB) value. */
7501 switch ((bRm & X86_MODRM_RM_MASK))
7502 {
7503 case 0: u32EffAddr = pCtx->eax; break;
7504 case 1: u32EffAddr = pCtx->ecx; break;
7505 case 2: u32EffAddr = pCtx->edx; break;
7506 case 3: u32EffAddr = pCtx->ebx; break;
7507 case 4: /* SIB */
7508 {
7509 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7510
7511 /* Get the index and scale it. */
7512 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7513 {
7514 case 0: u32EffAddr = pCtx->eax; break;
7515 case 1: u32EffAddr = pCtx->ecx; break;
7516 case 2: u32EffAddr = pCtx->edx; break;
7517 case 3: u32EffAddr = pCtx->ebx; break;
7518 case 4: u32EffAddr = 0; /*none */ break;
7519 case 5: u32EffAddr = pCtx->ebp; break;
7520 case 6: u32EffAddr = pCtx->esi; break;
7521 case 7: u32EffAddr = pCtx->edi; break;
7522 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7523 }
7524 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7525
7526 /* add base */
7527 switch (bSib & X86_SIB_BASE_MASK)
7528 {
7529 case 0: u32EffAddr += pCtx->eax; break;
7530 case 1: u32EffAddr += pCtx->ecx; break;
7531 case 2: u32EffAddr += pCtx->edx; break;
7532 case 3: u32EffAddr += pCtx->ebx; break;
7533 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7534 case 5:
7535 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7536 {
7537 u32EffAddr += pCtx->ebp;
7538 SET_SS_DEF();
7539 }
7540 else
7541 {
7542 uint32_t u32Disp;
7543 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7544 u32EffAddr += u32Disp;
7545 }
7546 break;
7547 case 6: u32EffAddr += pCtx->esi; break;
7548 case 7: u32EffAddr += pCtx->edi; break;
7549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7550 }
7551 break;
7552 }
7553 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7554 case 6: u32EffAddr = pCtx->esi; break;
7555 case 7: u32EffAddr = pCtx->edi; break;
7556 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7557 }
7558
7559 /* Get and add the displacement. */
7560 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7561 {
7562 case 0:
7563 break;
7564 case 1:
7565 {
7566 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7567 u32EffAddr += i8Disp;
7568 break;
7569 }
7570 case 2:
7571 {
7572 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7573 u32EffAddr += u32Disp;
7574 break;
7575 }
7576 default:
7577 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7578 }
7579
7580 }
7581 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7582 *pGCPtrEff = u32EffAddr;
7583 else
7584 {
7585 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7586 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7587 }
7588 }
7589 }
7590 else
7591 {
7592 uint64_t u64EffAddr;
7593
7594 /* Handle the rip+disp32 form with no registers first. */
7595 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7596 {
7597 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7598 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
7599 }
7600 else
7601 {
7602 /* Get the register (or SIB) value. */
7603 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7604 {
7605 case 0: u64EffAddr = pCtx->rax; break;
7606 case 1: u64EffAddr = pCtx->rcx; break;
7607 case 2: u64EffAddr = pCtx->rdx; break;
7608 case 3: u64EffAddr = pCtx->rbx; break;
7609 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7610 case 6: u64EffAddr = pCtx->rsi; break;
7611 case 7: u64EffAddr = pCtx->rdi; break;
7612 case 8: u64EffAddr = pCtx->r8; break;
7613 case 9: u64EffAddr = pCtx->r9; break;
7614 case 10: u64EffAddr = pCtx->r10; break;
7615 case 11: u64EffAddr = pCtx->r11; break;
7616 case 13: u64EffAddr = pCtx->r13; break;
7617 case 14: u64EffAddr = pCtx->r14; break;
7618 case 15: u64EffAddr = pCtx->r15; break;
7619 /* SIB */
7620 case 4:
7621 case 12:
7622 {
7623 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7624
7625 /* Get the index and scale it. */
7626 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7627 {
7628 case 0: u64EffAddr = pCtx->rax; break;
7629 case 1: u64EffAddr = pCtx->rcx; break;
7630 case 2: u64EffAddr = pCtx->rdx; break;
7631 case 3: u64EffAddr = pCtx->rbx; break;
7632 case 4: u64EffAddr = 0; /*none */ break;
7633 case 5: u64EffAddr = pCtx->rbp; break;
7634 case 6: u64EffAddr = pCtx->rsi; break;
7635 case 7: u64EffAddr = pCtx->rdi; break;
7636 case 8: u64EffAddr = pCtx->r8; break;
7637 case 9: u64EffAddr = pCtx->r9; break;
7638 case 10: u64EffAddr = pCtx->r10; break;
7639 case 11: u64EffAddr = pCtx->r11; break;
7640 case 12: u64EffAddr = pCtx->r12; break;
7641 case 13: u64EffAddr = pCtx->r13; break;
7642 case 14: u64EffAddr = pCtx->r14; break;
7643 case 15: u64EffAddr = pCtx->r15; break;
7644 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7645 }
7646 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7647
7648 /* add base */
7649 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7650 {
7651 case 0: u64EffAddr += pCtx->rax; break;
7652 case 1: u64EffAddr += pCtx->rcx; break;
7653 case 2: u64EffAddr += pCtx->rdx; break;
7654 case 3: u64EffAddr += pCtx->rbx; break;
7655 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7656 case 6: u64EffAddr += pCtx->rsi; break;
7657 case 7: u64EffAddr += pCtx->rdi; break;
7658 case 8: u64EffAddr += pCtx->r8; break;
7659 case 9: u64EffAddr += pCtx->r9; break;
7660 case 10: u64EffAddr += pCtx->r10; break;
7661 case 11: u64EffAddr += pCtx->r11; break;
7662 case 12: u64EffAddr += pCtx->r12; break;
7663 case 14: u64EffAddr += pCtx->r14; break;
7664 case 15: u64EffAddr += pCtx->r15; break;
7665 /* complicated encodings */
7666 case 5:
7667 case 13:
7668 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7669 {
7670 if (!pIemCpu->uRexB)
7671 {
7672 u64EffAddr += pCtx->rbp;
7673 SET_SS_DEF();
7674 }
7675 else
7676 u64EffAddr += pCtx->r13;
7677 }
7678 else
7679 {
7680 uint32_t u32Disp;
7681 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7682 u64EffAddr += (int32_t)u32Disp;
7683 }
7684 break;
7685 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7686 }
7687 break;
7688 }
7689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7690 }
7691
7692 /* Get and add the displacement. */
7693 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7694 {
7695 case 0:
7696 break;
7697 case 1:
7698 {
7699 int8_t i8Disp;
7700 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7701 u64EffAddr += i8Disp;
7702 break;
7703 }
7704 case 2:
7705 {
7706 uint32_t u32Disp;
7707 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7708 u64EffAddr += (int32_t)u32Disp;
7709 break;
7710 }
7711 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7712 }
7713
7714 }
7715
7716 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7717 *pGCPtrEff = u64EffAddr;
7718 else
7719 {
7720 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
7721 *pGCPtrEff = u64EffAddr & UINT32_MAX;
7722 }
7723 }
7724
7725 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7726 return VINF_SUCCESS;
7727}
7728
7729/** @} */
7730
7731
7732
7733/*
7734 * Include the instructions
7735 */
7736#include "IEMAllInstructions.cpp.h"
7737
7738
7739
7740
7741#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7742
7743/**
7744 * Sets up execution verification mode.
7745 */
7746static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7747{
7748 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7749 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7750
7751 /*
7752 * Always note down the address of the current instruction.
7753 */
7754 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7755 pIemCpu->uOldRip = pOrgCtx->rip;
7756
7757 /*
7758 * Enable verification and/or logging.
7759 */
7760 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7761 if ( pIemCpu->fNoRem
7762 && ( 0
7763#if 0 /* auto enable on first paged protected mode interrupt */
7764 || ( pOrgCtx->eflags.Bits.u1IF
7765 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7766 && TRPMHasTrap(pVCpu)
7767 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7768#endif
7769#if 0
7770 || ( pOrgCtx->cs == 0x10
7771 && ( pOrgCtx->rip == 0x90119e3e
7772 || pOrgCtx->rip == 0x901d9810)
7773#endif
7774#if 0 /* Auto enable DSL - FPU stuff. */
7775 || ( pOrgCtx->cs == 0x10
7776 && (// pOrgCtx->rip == 0xc02ec07f
7777 //|| pOrgCtx->rip == 0xc02ec082
7778 //|| pOrgCtx->rip == 0xc02ec0c9
7779 0
7780 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7781#endif
7782#if 0 /* Auto enable DSL - fstp st0 stuff. */
7783 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7784#endif
7785#if 0
7786 || pOrgCtx->rip == 0x9022bb3a
7787#endif
7788#if 0
7789 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7790#endif
7791#if 0
7792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7793 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7794#endif
7795#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7796 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7797 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7799#endif
7800#if 0 /* NT4SP1 - xadd early boot. */
7801 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7802#endif
7803#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7804 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7805#endif
7806#if 0 /* NT4SP1 - cmpxchg (AMD). */
7807 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7808#endif
7809#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7810 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7811#endif
7812#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7813 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7814
7815#endif
7816#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7817 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7818
7819#endif
7820#if 0 /* NT4SP1 - frstor [ecx] */
7821 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7822#endif
7823#if 0 /* xxxxxx - All long mode code. */
7824 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
7825#endif
7826#if 0 /* rep movsq linux 3.7 64-bit boot. */
7827 || (pOrgCtx->rip == 0x0000000000100241)
7828#endif
7829#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
7830 || (pOrgCtx->rip == 0x000000000215e240)
7831#endif
7832 )
7833 )
7834 {
7835 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7836 RTLogFlags(NULL, "enabled");
7837 pIemCpu->fNoRem = false;
7838 }
7839
7840 /*
7841 * Switch state.
7842 */
7843 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7844 {
7845 static CPUMCTX s_DebugCtx; /* Ugly! */
7846
7847 s_DebugCtx = *pOrgCtx;
7848 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7849 }
7850
7851 /*
7852 * See if there is an interrupt pending in TRPM and inject it if we can.
7853 */
7854 pIemCpu->uInjectCpl = UINT8_MAX;
7855 if ( pOrgCtx->eflags.Bits.u1IF
7856 && TRPMHasTrap(pVCpu)
7857 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7858 {
7859 uint8_t u8TrapNo;
7860 TRPMEVENT enmType;
7861 RTGCUINT uErrCode;
7862 RTGCPTR uCr2;
7863 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
7864 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7865 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7866 TRPMResetTrap(pVCpu);
7867 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7868 }
7869
7870 /*
7871 * Reset the counters.
7872 */
7873 pIemCpu->cIOReads = 0;
7874 pIemCpu->cIOWrites = 0;
7875 pIemCpu->fIgnoreRaxRdx = false;
7876 pIemCpu->fOverlappingMovs = false;
7877 pIemCpu->fUndefinedEFlags = 0;
7878
7879 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7880 {
7881 /*
7882 * Free all verification records.
7883 */
7884 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7885 pIemCpu->pIemEvtRecHead = NULL;
7886 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7887 do
7888 {
7889 while (pEvtRec)
7890 {
7891 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7892 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7893 pIemCpu->pFreeEvtRec = pEvtRec;
7894 pEvtRec = pNext;
7895 }
7896 pEvtRec = pIemCpu->pOtherEvtRecHead;
7897 pIemCpu->pOtherEvtRecHead = NULL;
7898 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7899 } while (pEvtRec);
7900 }
7901}
7902
7903
7904/**
7905 * Allocate an event record.
7906 * @returns Pointer to a record.
7907 */
7908static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7909{
7910 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7911 return NULL;
7912
7913 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7914 if (pEvtRec)
7915 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7916 else
7917 {
7918 if (!pIemCpu->ppIemEvtRecNext)
7919 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7920
7921 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7922 if (!pEvtRec)
7923 return NULL;
7924 }
7925 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7926 pEvtRec->pNext = NULL;
7927 return pEvtRec;
7928}
7929
7930
7931/**
7932 * IOMMMIORead notification.
7933 */
7934VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7935{
7936 PVMCPU pVCpu = VMMGetCpu(pVM);
7937 if (!pVCpu)
7938 return;
7939 PIEMCPU pIemCpu = &pVCpu->iem.s;
7940 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7941 if (!pEvtRec)
7942 return;
7943 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7944 pEvtRec->u.RamRead.GCPhys = GCPhys;
7945 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7946 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7947 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7948}
7949
7950
7951/**
7952 * IOMMMIOWrite notification.
7953 */
7954VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7955{
7956 PVMCPU pVCpu = VMMGetCpu(pVM);
7957 if (!pVCpu)
7958 return;
7959 PIEMCPU pIemCpu = &pVCpu->iem.s;
7960 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7961 if (!pEvtRec)
7962 return;
7963 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7964 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7965 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7966 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7967 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7968 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7969 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7970 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7971 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7972}
7973
7974
7975/**
7976 * IOMIOPortRead notification.
7977 */
7978VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7979{
7980 PVMCPU pVCpu = VMMGetCpu(pVM);
7981 if (!pVCpu)
7982 return;
7983 PIEMCPU pIemCpu = &pVCpu->iem.s;
7984 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7985 if (!pEvtRec)
7986 return;
7987 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7988 pEvtRec->u.IOPortRead.Port = Port;
7989 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7990 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7991 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7992}
7993
7994/**
7995 * IOMIOPortWrite notification.
7996 */
7997VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7998{
7999 PVMCPU pVCpu = VMMGetCpu(pVM);
8000 if (!pVCpu)
8001 return;
8002 PIEMCPU pIemCpu = &pVCpu->iem.s;
8003 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8004 if (!pEvtRec)
8005 return;
8006 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8007 pEvtRec->u.IOPortWrite.Port = Port;
8008 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8009 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8010 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8011 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8012}
8013
8014
8015VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
8016{
8017 AssertFailed();
8018}
8019
8020
8021VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
8022{
8023 AssertFailed();
8024}
8025
8026
8027/**
8028 * Fakes and records an I/O port read.
8029 *
8030 * @returns VINF_SUCCESS.
8031 * @param pIemCpu The IEM per CPU data.
8032 * @param Port The I/O port.
8033 * @param pu32Value Where to store the fake value.
8034 * @param cbValue The size of the access.
8035 */
8036static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8037{
8038 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8039 if (pEvtRec)
8040 {
8041 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8042 pEvtRec->u.IOPortRead.Port = Port;
8043 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8044 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8045 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8046 }
8047 pIemCpu->cIOReads++;
8048 *pu32Value = 0xcccccccc;
8049 return VINF_SUCCESS;
8050}
8051
8052
8053/**
8054 * Fakes and records an I/O port write.
8055 *
8056 * @returns VINF_SUCCESS.
8057 * @param pIemCpu The IEM per CPU data.
8058 * @param Port The I/O port.
8059 * @param u32Value The value being written.
8060 * @param cbValue The size of the access.
8061 */
8062static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8063{
8064 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8065 if (pEvtRec)
8066 {
8067 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8068 pEvtRec->u.IOPortWrite.Port = Port;
8069 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8070 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8071 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8072 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8073 }
8074 pIemCpu->cIOWrites++;
8075 return VINF_SUCCESS;
8076}
8077
8078
8079/**
8080 * Used to add extra details about a stub case.
8081 * @param pIemCpu The IEM per CPU state.
8082 */
8083static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
8084{
8085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8086 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8087 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8088 char szRegs[4096];
8089 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
8090 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
8091 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
8092 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
8093 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
8094 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
8095 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
8096 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
8097 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
8098 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
8099 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
8100 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
8101 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
8102 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
8103 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
8104 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
8105 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
8106 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
8107 " efer=%016VR{efer}\n"
8108 " pat=%016VR{pat}\n"
8109 " sf_mask=%016VR{sf_mask}\n"
8110 "krnl_gs_base=%016VR{krnl_gs_base}\n"
8111 " lstar=%016VR{lstar}\n"
8112 " star=%016VR{star} cstar=%016VR{cstar}\n"
8113 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
8114 );
8115
8116 char szInstr1[256];
8117 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
8118 DBGF_DISAS_FLAGS_DEFAULT_MODE,
8119 szInstr1, sizeof(szInstr1), NULL);
8120 char szInstr2[256];
8121 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
8122 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8123 szInstr2, sizeof(szInstr2), NULL);
8124
8125 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
8126}
8127
8128
8129/**
8130 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
8131 * dump to the assertion info.
8132 *
8133 * @param pEvtRec The record to dump.
8134 */
8135static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
8136{
8137 switch (pEvtRec->enmEvent)
8138 {
8139 case IEMVERIFYEVENT_IOPORT_READ:
8140 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
8141 pEvtRec->u.IOPortWrite.Port,
8142 pEvtRec->u.IOPortWrite.cbValue);
8143 break;
8144 case IEMVERIFYEVENT_IOPORT_WRITE:
8145 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
8146 pEvtRec->u.IOPortWrite.Port,
8147 pEvtRec->u.IOPortWrite.cbValue,
8148 pEvtRec->u.IOPortWrite.u32Value);
8149 break;
8150 case IEMVERIFYEVENT_RAM_READ:
8151 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
8152 pEvtRec->u.RamRead.GCPhys,
8153 pEvtRec->u.RamRead.cb);
8154 break;
8155 case IEMVERIFYEVENT_RAM_WRITE:
8156 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
8157 pEvtRec->u.RamWrite.GCPhys,
8158 pEvtRec->u.RamWrite.cb,
8159 (int)pEvtRec->u.RamWrite.cb,
8160 pEvtRec->u.RamWrite.ab);
8161 break;
8162 default:
8163 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
8164 break;
8165 }
8166}
8167
8168
8169/**
8170 * Raises an assertion on the specified record, showing the given message with
8171 * a record dump attached.
8172 *
8173 * @param pIemCpu The IEM per CPU data.
8174 * @param pEvtRec1 The first record.
8175 * @param pEvtRec2 The second record.
8176 * @param pszMsg The message explaining why we're asserting.
8177 */
8178static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
8179{
8180 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8181 iemVerifyAssertAddRecordDump(pEvtRec1);
8182 iemVerifyAssertAddRecordDump(pEvtRec2);
8183 iemVerifyAssertMsg2(pIemCpu);
8184 RTAssertPanic();
8185}
8186
8187
8188/**
8189 * Raises an assertion on the specified record, showing the given message with
8190 * a record dump attached.
8191 *
8192 * @param pIemCpu The IEM per CPU data.
8193 * @param pEvtRec1 The first record.
8194 * @param pszMsg The message explaining why we're asserting.
8195 */
8196static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
8197{
8198 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8199 iemVerifyAssertAddRecordDump(pEvtRec);
8200 iemVerifyAssertMsg2(pIemCpu);
8201 RTAssertPanic();
8202}
8203
8204
8205/**
8206 * Verifies a write record.
8207 *
8208 * @param pIemCpu The IEM per CPU data.
8209 * @param pEvtRec The write record.
8210 */
8211static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
8212{
8213 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
8214 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
8215 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
8216 if ( RT_FAILURE(rc)
8217 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
8218 {
8219 /* fend off ins */
8220 if ( !pIemCpu->cIOReads
8221 || pEvtRec->u.RamWrite.ab[0] != 0xcc
8222 || ( pEvtRec->u.RamWrite.cb != 1
8223 && pEvtRec->u.RamWrite.cb != 2
8224 && pEvtRec->u.RamWrite.cb != 4) )
8225 {
8226 /* fend off ROMs */
8227 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
8228 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
8229 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
8230 {
8231 /* fend off fxsave */
8232 if (pEvtRec->u.RamWrite.cb != 512)
8233 {
8234 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
8235 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
8236 RTAssertMsg2Add("REM: %.*Rhxs\n"
8237 "IEM: %.*Rhxs\n",
8238 pEvtRec->u.RamWrite.cb, abBuf,
8239 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
8240 iemVerifyAssertAddRecordDump(pEvtRec);
8241 iemVerifyAssertMsg2(pIemCpu);
8242 RTAssertPanic();
8243 }
8244 }
8245 }
8246 }
8247
8248}
8249
8250/**
8251 * Performs the post-execution verfication checks.
8252 */
8253static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
8254{
8255 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8256 return;
8257
8258 /*
8259 * Switch back the state.
8260 */
8261 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
8262 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
8263 Assert(pOrgCtx != pDebugCtx);
8264 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8265
8266 /*
8267 * Execute the instruction in REM.
8268 */
8269 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8270 EMRemLock(pVM);
8271 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
8272 AssertRC(rc);
8273 EMRemUnlock(pVM);
8274
8275 /*
8276 * Compare the register states.
8277 */
8278 unsigned cDiffs = 0;
8279 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
8280 {
8281 //Log(("REM and IEM ends up with different registers!\n"));
8282
8283# define CHECK_FIELD(a_Field) \
8284 do \
8285 { \
8286 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8287 { \
8288 switch (sizeof(pOrgCtx->a_Field)) \
8289 { \
8290 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8291 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8292 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8293 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
8294 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
8295 } \
8296 cDiffs++; \
8297 } \
8298 } while (0)
8299
8300# define CHECK_BIT_FIELD(a_Field) \
8301 do \
8302 { \
8303 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8304 { \
8305 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8306 cDiffs++; \
8307 } \
8308 } while (0)
8309
8310# define CHECK_SEL(a_Sel) \
8311 do \
8312 { \
8313 CHECK_FIELD(a_Sel.Sel); \
8314 CHECK_FIELD(a_Sel.Attr.u); \
8315 CHECK_FIELD(a_Sel.u64Base); \
8316 CHECK_FIELD(a_Sel.u32Limit); \
8317 CHECK_FIELD(a_Sel.fFlags); \
8318 } while (0)
8319
8320#if 1 /* The recompiler doesn't update these the intel way. */
8321 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8322 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8323 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8324 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8325 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8326 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8327 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8328 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8329 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8330 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8331#endif
8332 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8333 {
8334 RTAssertMsg2Weak(" the FPU state differs\n");
8335 cDiffs++;
8336 CHECK_FIELD(fpu.FCW);
8337 CHECK_FIELD(fpu.FSW);
8338 CHECK_FIELD(fpu.FTW);
8339 CHECK_FIELD(fpu.FOP);
8340 CHECK_FIELD(fpu.FPUIP);
8341 CHECK_FIELD(fpu.CS);
8342 CHECK_FIELD(fpu.Rsrvd1);
8343 CHECK_FIELD(fpu.FPUDP);
8344 CHECK_FIELD(fpu.DS);
8345 CHECK_FIELD(fpu.Rsrvd2);
8346 CHECK_FIELD(fpu.MXCSR);
8347 CHECK_FIELD(fpu.MXCSR_MASK);
8348 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8349 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8350 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8351 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8352 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8353 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8354 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8355 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8356 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8357 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8358 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8359 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8360 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8361 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8362 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8363 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8364 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8365 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8366 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8367 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8368 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8369 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8370 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8371 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8372 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8373 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8374 }
8375 CHECK_FIELD(rip);
8376 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8377 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8378 {
8379 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8380 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8381 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8382 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8383 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8384 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8385 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8386 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8387 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8388 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8389 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8390 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8391 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8392 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8393 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8394 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8395 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8396 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8397 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8398 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8399 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8400 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8401 }
8402
8403 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8404 CHECK_FIELD(rax);
8405 CHECK_FIELD(rcx);
8406 if (!pIemCpu->fIgnoreRaxRdx)
8407 CHECK_FIELD(rdx);
8408 CHECK_FIELD(rbx);
8409 CHECK_FIELD(rsp);
8410 CHECK_FIELD(rbp);
8411 CHECK_FIELD(rsi);
8412 CHECK_FIELD(rdi);
8413 CHECK_FIELD(r8);
8414 CHECK_FIELD(r9);
8415 CHECK_FIELD(r10);
8416 CHECK_FIELD(r11);
8417 CHECK_FIELD(r12);
8418 CHECK_FIELD(r13);
8419 CHECK_SEL(cs);
8420 CHECK_SEL(ss);
8421 CHECK_SEL(ds);
8422 CHECK_SEL(es);
8423 CHECK_SEL(fs);
8424 CHECK_SEL(gs);
8425 CHECK_FIELD(cr0);
8426 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8427 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8428 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8429 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8430 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8431 {
8432 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8433 { /* ignore */ }
8434 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8435 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8436 { /* ignore */ }
8437 else
8438 CHECK_FIELD(cr2);
8439 }
8440 CHECK_FIELD(cr3);
8441 CHECK_FIELD(cr4);
8442 CHECK_FIELD(dr[0]);
8443 CHECK_FIELD(dr[1]);
8444 CHECK_FIELD(dr[2]);
8445 CHECK_FIELD(dr[3]);
8446 CHECK_FIELD(dr[6]);
8447 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8448 CHECK_FIELD(dr[7]);
8449 CHECK_FIELD(gdtr.cbGdt);
8450 CHECK_FIELD(gdtr.pGdt);
8451 CHECK_FIELD(idtr.cbIdt);
8452 CHECK_FIELD(idtr.pIdt);
8453 CHECK_SEL(ldtr);
8454 CHECK_SEL(tr);
8455 CHECK_FIELD(SysEnter.cs);
8456 CHECK_FIELD(SysEnter.eip);
8457 CHECK_FIELD(SysEnter.esp);
8458 CHECK_FIELD(msrEFER);
8459 CHECK_FIELD(msrSTAR);
8460 CHECK_FIELD(msrPAT);
8461 CHECK_FIELD(msrLSTAR);
8462 CHECK_FIELD(msrCSTAR);
8463 CHECK_FIELD(msrSFMASK);
8464 CHECK_FIELD(msrKERNELGSBASE);
8465
8466 if (cDiffs != 0)
8467 {
8468 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
8469 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8470 iemVerifyAssertMsg2(pIemCpu);
8471 RTAssertPanic();
8472 }
8473# undef CHECK_FIELD
8474# undef CHECK_BIT_FIELD
8475 }
8476
8477 /*
8478 * If the register state compared fine, check the verification event
8479 * records.
8480 */
8481 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8482 {
8483 /*
8484 * Compare verficiation event records.
8485 * - I/O port accesses should be a 1:1 match.
8486 */
8487 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8488 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8489 while (pIemRec && pOtherRec)
8490 {
8491 /* Since we might miss RAM writes and reads, ignore reads and check
8492 that any written memory is the same extra ones. */
8493 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8494 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8495 && pIemRec->pNext)
8496 {
8497 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8498 iemVerifyWriteRecord(pIemCpu, pIemRec);
8499 pIemRec = pIemRec->pNext;
8500 }
8501
8502 /* Do the compare. */
8503 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8504 {
8505 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8506 break;
8507 }
8508 bool fEquals;
8509 switch (pIemRec->enmEvent)
8510 {
8511 case IEMVERIFYEVENT_IOPORT_READ:
8512 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8513 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8514 break;
8515 case IEMVERIFYEVENT_IOPORT_WRITE:
8516 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8517 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8518 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8519 break;
8520 case IEMVERIFYEVENT_RAM_READ:
8521 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8522 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8523 break;
8524 case IEMVERIFYEVENT_RAM_WRITE:
8525 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8526 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8527 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8528 break;
8529 default:
8530 fEquals = false;
8531 break;
8532 }
8533 if (!fEquals)
8534 {
8535 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8536 break;
8537 }
8538
8539 /* advance */
8540 pIemRec = pIemRec->pNext;
8541 pOtherRec = pOtherRec->pNext;
8542 }
8543
8544 /* Ignore extra writes and reads. */
8545 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8546 {
8547 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8548 iemVerifyWriteRecord(pIemCpu, pIemRec);
8549 pIemRec = pIemRec->pNext;
8550 }
8551 if (pIemRec != NULL)
8552 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8553 else if (pOtherRec != NULL)
8554 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
8555 }
8556 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8557}
8558
8559#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8560
8561/* stubs */
8562static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8563{
8564 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8565 return VERR_INTERNAL_ERROR;
8566}
8567
8568static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8569{
8570 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8571 return VERR_INTERNAL_ERROR;
8572}
8573
8574#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8575
8576
8577/**
8578 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8579 * IEMExecOneWithPrefetchedByPC.
8580 *
8581 * @return Strict VBox status code.
8582 * @param pVCpu The current virtual CPU.
8583 * @param pIemCpu The IEM per CPU data.
8584 * @param fExecuteInhibit If set, execute the instruction following CLI,
8585 * POP SS and MOV SS,GR.
8586 */
8587DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8588{
8589 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8590 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8591 if (rcStrict == VINF_SUCCESS)
8592 pIemCpu->cInstructions++;
8593//#ifdef DEBUG
8594// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8595//#endif
8596
8597 /* Execute the next instruction as well if a cli, pop ss or
8598 mov ss, Gr has just completed successfully. */
8599 if ( fExecuteInhibit
8600 && rcStrict == VINF_SUCCESS
8601 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8602 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8603 {
8604 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8605 if (rcStrict == VINF_SUCCESS)
8606 {
8607 b; IEM_OPCODE_GET_NEXT_U8(&b);
8608 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8609 if (rcStrict == VINF_SUCCESS)
8610 pIemCpu->cInstructions++;
8611 }
8612 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8613 }
8614
8615 /*
8616 * Return value fiddling and statistics.
8617 */
8618 if (rcStrict != VINF_SUCCESS)
8619 {
8620 if (RT_SUCCESS(rcStrict))
8621 {
8622 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8623 int32_t const rcPassUp = pIemCpu->rcPassUp;
8624 if (rcPassUp == VINF_SUCCESS)
8625 pIemCpu->cRetInfStatuses++;
8626 else if ( rcPassUp < VINF_EM_FIRST
8627 || rcPassUp > VINF_EM_LAST
8628 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8629 {
8630 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8631 pIemCpu->cRetPassUpStatus++;
8632 rcStrict = rcPassUp;
8633 }
8634 else
8635 {
8636 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8637 pIemCpu->cRetInfStatuses++;
8638 }
8639 }
8640 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8641 pIemCpu->cRetAspectNotImplemented++;
8642 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8643 pIemCpu->cRetInstrNotImplemented++;
8644#ifdef IEM_VERIFICATION_MODE_FULL
8645 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8646 rcStrict = VINF_SUCCESS;
8647#endif
8648 else
8649 pIemCpu->cRetErrStatuses++;
8650 }
8651 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8652 {
8653 pIemCpu->cRetPassUpStatus++;
8654 rcStrict = pIemCpu->rcPassUp;
8655 }
8656
8657 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8658 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8659#if defined(IEM_VERIFICATION_MODE_FULL)
8660 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8661 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8662 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8663 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8664#endif
8665 return rcStrict;
8666}
8667
8668
8669#ifdef IN_RC
8670/**
8671 * Re-enters raw-mode or ensure we return to ring-3.
8672 *
8673 * @returns rcStrict, maybe modified.
8674 * @param pIemCpu The IEM CPU structure.
8675 * @param pVCpu The cross context virtual CPU structure of the caller.
8676 * @param pCtx The current CPU context.
8677 * @param rcStrict The status code returne by the interpreter.
8678 */
8679DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
8680{
8681 if (!pIemCpu->fInPatchCode)
8682 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
8683 return rcStrict;
8684}
8685#endif
8686
8687
8688/**
8689 * Execute one instruction.
8690 *
8691 * @return Strict VBox status code.
8692 * @param pVCpu The current virtual CPU.
8693 */
8694VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8695{
8696 PIEMCPU pIemCpu = &pVCpu->iem.s;
8697
8698#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8699 iemExecVerificationModeSetup(pIemCpu);
8700#endif
8701#ifdef LOG_ENABLED
8702 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8703# ifdef IN_RING3
8704 if (LogIs2Enabled())
8705 {
8706 char szInstr[256];
8707 uint32_t cbInstr = 0;
8708 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8709 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8710 szInstr, sizeof(szInstr), &cbInstr);
8711
8712 Log2(("**** "
8713 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8714 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8715 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8716 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8717 " %s\n"
8718 ,
8719 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8720 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8721 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8722 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8723 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8724 szInstr));
8725
8726 if (LogIs3Enabled())
8727 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
8728 }
8729 else
8730# endif
8731 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8732 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8733#endif
8734
8735 /*
8736 * Do the decoding and emulation.
8737 */
8738 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8739 if (rcStrict == VINF_SUCCESS)
8740 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8741
8742#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8743 /*
8744 * Assert some sanity.
8745 */
8746 iemExecVerificationModeCheck(pIemCpu);
8747#endif
8748#ifdef IN_RC
8749 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
8750#endif
8751 if (rcStrict != VINF_SUCCESS)
8752 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8753 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8754 return rcStrict;
8755}
8756
8757
8758VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8759{
8760 PIEMCPU pIemCpu = &pVCpu->iem.s;
8761 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8762 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8763
8764 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8765 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8766 if (rcStrict == VINF_SUCCESS)
8767 {
8768 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8769 if (pcbWritten)
8770 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8771 }
8772
8773#ifdef IN_RC
8774 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8775#endif
8776 return rcStrict;
8777}
8778
8779
8780VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8781 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8782{
8783 PIEMCPU pIemCpu = &pVCpu->iem.s;
8784 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8785 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8786
8787 VBOXSTRICTRC rcStrict;
8788 if ( cbOpcodeBytes
8789 && pCtx->rip == OpcodeBytesPC)
8790 {
8791 iemInitDecoder(pIemCpu, false);
8792 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8793 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8794 rcStrict = VINF_SUCCESS;
8795 }
8796 else
8797 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8798 if (rcStrict == VINF_SUCCESS)
8799 {
8800 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8801 }
8802
8803#ifdef IN_RC
8804 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8805#endif
8806 return rcStrict;
8807}
8808
8809
8810VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8811{
8812 PIEMCPU pIemCpu = &pVCpu->iem.s;
8813 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8814 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8815
8816 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8817 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8818 if (rcStrict == VINF_SUCCESS)
8819 {
8820 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8821 if (pcbWritten)
8822 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8823 }
8824
8825#ifdef IN_RC
8826 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8827#endif
8828 return rcStrict;
8829}
8830
8831
8832VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8833 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8834{
8835 PIEMCPU pIemCpu = &pVCpu->iem.s;
8836 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8837 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8838
8839 VBOXSTRICTRC rcStrict;
8840 if ( cbOpcodeBytes
8841 && pCtx->rip == OpcodeBytesPC)
8842 {
8843 iemInitDecoder(pIemCpu, true);
8844 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8845 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8846 rcStrict = VINF_SUCCESS;
8847 }
8848 else
8849 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8850 if (rcStrict == VINF_SUCCESS)
8851 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8852
8853#ifdef IN_RC
8854 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
8855#endif
8856 return rcStrict;
8857}
8858
8859
8860VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8861{
8862 PIEMCPU pIemCpu = &pVCpu->iem.s;
8863 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8864
8865 /*
8866 * See if there is an interrupt pending in TRPM and inject it if we can.
8867 */
8868#ifdef IEM_VERIFICATION_MODE_FULL
8869 pIemCpu->uInjectCpl = UINT8_MAX;
8870#endif
8871 if ( pCtx->eflags.Bits.u1IF
8872 && TRPMHasTrap(pVCpu)
8873 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
8874 {
8875 uint8_t u8TrapNo;
8876 TRPMEVENT enmType;
8877 RTGCUINT uErrCode;
8878 RTGCPTR uCr2;
8879 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
8880 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
8881 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8882 TRPMResetTrap(pVCpu);
8883 }
8884
8885 /*
8886 * Log the state.
8887 */
8888#ifdef LOG_ENABLED
8889# ifdef IN_RING3
8890 if (LogIs2Enabled())
8891 {
8892 char szInstr[256];
8893 uint32_t cbInstr = 0;
8894 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8895 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8896 szInstr, sizeof(szInstr), &cbInstr);
8897
8898 Log2(("**** "
8899 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8900 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8901 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8902 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8903 " %s\n"
8904 ,
8905 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8906 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8907 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8908 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8909 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8910 szInstr));
8911
8912 if (LogIs3Enabled())
8913 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
8914 }
8915 else
8916# endif
8917 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8918 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8919#endif
8920
8921 /*
8922 * Do the decoding and emulation.
8923 */
8924 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8925 if (rcStrict == VINF_SUCCESS)
8926 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8927
8928 /*
8929 * Maybe re-enter raw-mode and log.
8930 */
8931#ifdef IN_RC
8932 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
8933#endif
8934 if (rcStrict != VINF_SUCCESS)
8935 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8936 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8937 return rcStrict;
8938}
8939
8940
8941
8942/**
8943 * Injects a trap, fault, abort, software interrupt or external interrupt.
8944 *
8945 * The parameter list matches TRPMQueryTrapAll pretty closely.
8946 *
8947 * @returns Strict VBox status code.
8948 * @param pVCpu The current virtual CPU.
8949 * @param u8TrapNo The trap number.
8950 * @param enmType What type is it (trap/fault/abort), software
8951 * interrupt or hardware interrupt.
8952 * @param uErrCode The error code if applicable.
8953 * @param uCr2 The CR2 value if applicable.
8954 */
8955VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8956{
8957 iemInitDecoder(&pVCpu->iem.s, false);
8958
8959 uint32_t fFlags;
8960 switch (enmType)
8961 {
8962 case TRPM_HARDWARE_INT:
8963 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8964 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8965 uErrCode = uCr2 = 0;
8966 break;
8967
8968 case TRPM_SOFTWARE_INT:
8969 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8970 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8971 uErrCode = uCr2 = 0;
8972 break;
8973
8974 case TRPM_TRAP:
8975 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8976 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8977 if (u8TrapNo == X86_XCPT_PF)
8978 fFlags |= IEM_XCPT_FLAGS_CR2;
8979 switch (u8TrapNo)
8980 {
8981 case X86_XCPT_DF:
8982 case X86_XCPT_TS:
8983 case X86_XCPT_NP:
8984 case X86_XCPT_SS:
8985 case X86_XCPT_PF:
8986 case X86_XCPT_AC:
8987 fFlags |= IEM_XCPT_FLAGS_ERR;
8988 break;
8989 }
8990 break;
8991
8992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8993 }
8994
8995 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8996}
8997
8998
8999VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9000{
9001 return VERR_NOT_IMPLEMENTED;
9002}
9003
9004
9005VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9006{
9007 return VERR_NOT_IMPLEMENTED;
9008}
9009
9010
9011#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
9012/**
9013 * Executes a IRET instruction with default operand size.
9014 *
9015 * This is for PATM.
9016 *
9017 * @returns VBox status code.
9018 * @param pVCpu The current virtual CPU.
9019 * @param pCtxCore The register frame.
9020 */
9021VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
9022{
9023 PIEMCPU pIemCpu = &pVCpu->iem.s;
9024 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9025
9026 iemCtxCoreToCtx(pCtx, pCtxCore);
9027 iemInitDecoder(pIemCpu);
9028 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
9029 if (rcStrict == VINF_SUCCESS)
9030 iemCtxToCtxCore(pCtxCore, pCtx);
9031 else
9032 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9033 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9034 return rcStrict;
9035}
9036#endif
9037
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette