VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 43030

Last change on this file since 43030 was 42778, checked in by vboxsync, 12 years ago

IEM: CR4 and CR3 fixes. Debugging hacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 313.5 KB
Line 
1/* $Id: IEMAll.cpp 42778 2012-08-11 22:47:03Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pgm.h>
86#include <internal/pgm.h>
87#include <VBox/vmm/iom.h>
88#include <VBox/vmm/em.h>
89#include <VBox/vmm/tm.h>
90#include <VBox/vmm/dbgf.h>
91#ifdef VBOX_WITH_RAW_MODE_NOT_R0
92# include <VBox/vmm/patm.h>
93#endif
94#include "IEMInternal.h"
95#ifdef IEM_VERIFICATION_MODE_FULL
96# include <VBox/vmm/rem.h>
97# include <VBox/vmm/mm.h>
98#endif
99#include <VBox/vmm/vm.h>
100#include <VBox/log.h>
101#include <VBox/err.h>
102#include <VBox/param.h>
103#include <iprt/assert.h>
104#include <iprt/string.h>
105#include <iprt/x86.h>
106
107
108/*******************************************************************************
109* Structures and Typedefs *
110*******************************************************************************/
111/** @typedef PFNIEMOP
112 * Pointer to an opcode decoder function.
113 */
114
115/** @def FNIEMOP_DEF
116 * Define an opcode decoder function.
117 *
118 * We're using macors for this so that adding and removing parameters as well as
119 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
120 *
121 * @param a_Name The function name.
122 */
123
124
125#if defined(__GNUC__) && defined(RT_ARCH_X86)
126typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
127# define FNIEMOP_DEF(a_Name) \
128 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
129# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
130 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
131# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
132 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
133
134#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
135typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
136# define FNIEMOP_DEF(a_Name) \
137 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
138# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
139 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
140# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
141 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
142
143#elif defined(__GNUC__)
144typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
145# define FNIEMOP_DEF(a_Name) \
146 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
147# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
148 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
149# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
150 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
151
152#else
153typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
154# define FNIEMOP_DEF(a_Name) \
155 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
156# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
157 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
158# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
159 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
160
161#endif
162
163
164/**
165 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
166 */
167typedef union IEMSELDESC
168{
169 /** The legacy view. */
170 X86DESC Legacy;
171 /** The long mode view. */
172 X86DESC64 Long;
173} IEMSELDESC;
174/** Pointer to a selector descriptor table entry. */
175typedef IEMSELDESC *PIEMSELDESC;
176
177
178/*******************************************************************************
179* Defined Constants And Macros *
180*******************************************************************************/
181/** @name IEM status codes.
182 *
183 * Not quite sure how this will play out in the end, just aliasing safe status
184 * codes for now.
185 *
186 * @{ */
187#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
188/** @} */
189
190/** Temporary hack to disable the double execution. Will be removed in favor
191 * of a dedicated execution mode in EM. */
192//#define IEM_VERIFICATION_MODE_NO_REM
193
194/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
195 * due to GCC lacking knowledge about the value range of a switch. */
196#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
197
198/**
199 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
200 * occation.
201 */
202#ifdef LOG_ENABLED
203# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
204 do { \
205 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
206 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
207 } while (0)
208#else
209# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
210 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
211#endif
212
213/**
214 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
215 * occation using the supplied logger statement.
216 *
217 * @param a_LoggerArgs What to log on failure.
218 */
219#ifdef LOG_ENABLED
220# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
221 do { \
222 LogFunc(a_LoggerArgs); \
223 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
224 } while (0)
225#else
226# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
227 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
228#endif
229
230/**
231 * Call an opcode decoder function.
232 *
233 * We're using macors for this so that adding and removing parameters can be
234 * done as we please. See FNIEMOP_DEF.
235 */
236#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
237
238/**
239 * Call a common opcode decoder function taking one extra argument.
240 *
241 * We're using macors for this so that adding and removing parameters can be
242 * done as we please. See FNIEMOP_DEF_1.
243 */
244#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
245
246/**
247 * Call a common opcode decoder function taking one extra argument.
248 *
249 * We're using macors for this so that adding and removing parameters can be
250 * done as we please. See FNIEMOP_DEF_1.
251 */
252#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
253
254/**
255 * Check if we're currently executing in real or virtual 8086 mode.
256 *
257 * @returns @c true if it is, @c false if not.
258 * @param a_pIemCpu The IEM state of the current CPU.
259 */
260#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
261
262/**
263 * Check if we're currently executing in long mode.
264 *
265 * @returns @c true if it is, @c false if not.
266 * @param a_pIemCpu The IEM state of the current CPU.
267 */
268#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
269
270/**
271 * Check if we're currently executing in real mode.
272 *
273 * @returns @c true if it is, @c false if not.
274 * @param a_pIemCpu The IEM state of the current CPU.
275 */
276#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
277
278/**
279 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
280 */
281#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
282
283/**
284 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
285 */
286#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
287
288/**
289 * Tests if at least on of the specified AMD CPUID features (extended) are
290 * marked present.
291 */
292#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
293
294/**
295 * Checks if an Intel CPUID feature is present.
296 */
297#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
298 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
299 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
300
301/**
302 * Evaluates to true if we're presenting an Intel CPU to the guest.
303 */
304#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
305
306/**
307 * Evaluates to true if we're presenting an AMD CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
310
311/**
312 * Check if the address is canonical.
313 */
314#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
315
316
317/*******************************************************************************
318* Global Variables *
319*******************************************************************************/
320extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
321
322
323/** Function table for the ADD instruction. */
324static const IEMOPBINSIZES g_iemAImpl_add =
325{
326 iemAImpl_add_u8, iemAImpl_add_u8_locked,
327 iemAImpl_add_u16, iemAImpl_add_u16_locked,
328 iemAImpl_add_u32, iemAImpl_add_u32_locked,
329 iemAImpl_add_u64, iemAImpl_add_u64_locked
330};
331
332/** Function table for the ADC instruction. */
333static const IEMOPBINSIZES g_iemAImpl_adc =
334{
335 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
336 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
337 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
338 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
339};
340
341/** Function table for the SUB instruction. */
342static const IEMOPBINSIZES g_iemAImpl_sub =
343{
344 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
345 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
346 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
347 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
348};
349
350/** Function table for the SBB instruction. */
351static const IEMOPBINSIZES g_iemAImpl_sbb =
352{
353 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
354 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
355 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
356 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
357};
358
359/** Function table for the OR instruction. */
360static const IEMOPBINSIZES g_iemAImpl_or =
361{
362 iemAImpl_or_u8, iemAImpl_or_u8_locked,
363 iemAImpl_or_u16, iemAImpl_or_u16_locked,
364 iemAImpl_or_u32, iemAImpl_or_u32_locked,
365 iemAImpl_or_u64, iemAImpl_or_u64_locked
366};
367
368/** Function table for the XOR instruction. */
369static const IEMOPBINSIZES g_iemAImpl_xor =
370{
371 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
372 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
373 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
374 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
375};
376
377/** Function table for the AND instruction. */
378static const IEMOPBINSIZES g_iemAImpl_and =
379{
380 iemAImpl_and_u8, iemAImpl_and_u8_locked,
381 iemAImpl_and_u16, iemAImpl_and_u16_locked,
382 iemAImpl_and_u32, iemAImpl_and_u32_locked,
383 iemAImpl_and_u64, iemAImpl_and_u64_locked
384};
385
386/** Function table for the CMP instruction.
387 * @remarks Making operand order ASSUMPTIONS.
388 */
389static const IEMOPBINSIZES g_iemAImpl_cmp =
390{
391 iemAImpl_cmp_u8, NULL,
392 iemAImpl_cmp_u16, NULL,
393 iemAImpl_cmp_u32, NULL,
394 iemAImpl_cmp_u64, NULL
395};
396
397/** Function table for the TEST instruction.
398 * @remarks Making operand order ASSUMPTIONS.
399 */
400static const IEMOPBINSIZES g_iemAImpl_test =
401{
402 iemAImpl_test_u8, NULL,
403 iemAImpl_test_u16, NULL,
404 iemAImpl_test_u32, NULL,
405 iemAImpl_test_u64, NULL
406};
407
408/** Function table for the BT instruction. */
409static const IEMOPBINSIZES g_iemAImpl_bt =
410{
411 NULL, NULL,
412 iemAImpl_bt_u16, NULL,
413 iemAImpl_bt_u32, NULL,
414 iemAImpl_bt_u64, NULL
415};
416
417/** Function table for the BTC instruction. */
418static const IEMOPBINSIZES g_iemAImpl_btc =
419{
420 NULL, NULL,
421 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
422 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
423 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
424};
425
426/** Function table for the BTR instruction. */
427static const IEMOPBINSIZES g_iemAImpl_btr =
428{
429 NULL, NULL,
430 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
431 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
432 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
433};
434
435/** Function table for the BTS instruction. */
436static const IEMOPBINSIZES g_iemAImpl_bts =
437{
438 NULL, NULL,
439 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
440 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
441 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
442};
443
444/** Function table for the BSF instruction. */
445static const IEMOPBINSIZES g_iemAImpl_bsf =
446{
447 NULL, NULL,
448 iemAImpl_bsf_u16, NULL,
449 iemAImpl_bsf_u32, NULL,
450 iemAImpl_bsf_u64, NULL
451};
452
453/** Function table for the BSR instruction. */
454static const IEMOPBINSIZES g_iemAImpl_bsr =
455{
456 NULL, NULL,
457 iemAImpl_bsr_u16, NULL,
458 iemAImpl_bsr_u32, NULL,
459 iemAImpl_bsr_u64, NULL
460};
461
462/** Function table for the IMUL instruction. */
463static const IEMOPBINSIZES g_iemAImpl_imul_two =
464{
465 NULL, NULL,
466 iemAImpl_imul_two_u16, NULL,
467 iemAImpl_imul_two_u32, NULL,
468 iemAImpl_imul_two_u64, NULL
469};
470
471/** Group 1 /r lookup table. */
472static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
473{
474 &g_iemAImpl_add,
475 &g_iemAImpl_or,
476 &g_iemAImpl_adc,
477 &g_iemAImpl_sbb,
478 &g_iemAImpl_and,
479 &g_iemAImpl_sub,
480 &g_iemAImpl_xor,
481 &g_iemAImpl_cmp
482};
483
484/** Function table for the INC instruction. */
485static const IEMOPUNARYSIZES g_iemAImpl_inc =
486{
487 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
488 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
489 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
490 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
491};
492
493/** Function table for the DEC instruction. */
494static const IEMOPUNARYSIZES g_iemAImpl_dec =
495{
496 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
497 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
498 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
499 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
500};
501
502/** Function table for the NEG instruction. */
503static const IEMOPUNARYSIZES g_iemAImpl_neg =
504{
505 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
506 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
507 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
508 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
509};
510
511/** Function table for the NOT instruction. */
512static const IEMOPUNARYSIZES g_iemAImpl_not =
513{
514 iemAImpl_not_u8, iemAImpl_not_u8_locked,
515 iemAImpl_not_u16, iemAImpl_not_u16_locked,
516 iemAImpl_not_u32, iemAImpl_not_u32_locked,
517 iemAImpl_not_u64, iemAImpl_not_u64_locked
518};
519
520
521/** Function table for the ROL instruction. */
522static const IEMOPSHIFTSIZES g_iemAImpl_rol =
523{
524 iemAImpl_rol_u8,
525 iemAImpl_rol_u16,
526 iemAImpl_rol_u32,
527 iemAImpl_rol_u64
528};
529
530/** Function table for the ROR instruction. */
531static const IEMOPSHIFTSIZES g_iemAImpl_ror =
532{
533 iemAImpl_ror_u8,
534 iemAImpl_ror_u16,
535 iemAImpl_ror_u32,
536 iemAImpl_ror_u64
537};
538
539/** Function table for the RCL instruction. */
540static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
541{
542 iemAImpl_rcl_u8,
543 iemAImpl_rcl_u16,
544 iemAImpl_rcl_u32,
545 iemAImpl_rcl_u64
546};
547
548/** Function table for the RCR instruction. */
549static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
550{
551 iemAImpl_rcr_u8,
552 iemAImpl_rcr_u16,
553 iemAImpl_rcr_u32,
554 iemAImpl_rcr_u64
555};
556
557/** Function table for the SHL instruction. */
558static const IEMOPSHIFTSIZES g_iemAImpl_shl =
559{
560 iemAImpl_shl_u8,
561 iemAImpl_shl_u16,
562 iemAImpl_shl_u32,
563 iemAImpl_shl_u64
564};
565
566/** Function table for the SHR instruction. */
567static const IEMOPSHIFTSIZES g_iemAImpl_shr =
568{
569 iemAImpl_shr_u8,
570 iemAImpl_shr_u16,
571 iemAImpl_shr_u32,
572 iemAImpl_shr_u64
573};
574
575/** Function table for the SAR instruction. */
576static const IEMOPSHIFTSIZES g_iemAImpl_sar =
577{
578 iemAImpl_sar_u8,
579 iemAImpl_sar_u16,
580 iemAImpl_sar_u32,
581 iemAImpl_sar_u64
582};
583
584
585/** Function table for the MUL instruction. */
586static const IEMOPMULDIVSIZES g_iemAImpl_mul =
587{
588 iemAImpl_mul_u8,
589 iemAImpl_mul_u16,
590 iemAImpl_mul_u32,
591 iemAImpl_mul_u64
592};
593
594/** Function table for the IMUL instruction working implicitly on rAX. */
595static const IEMOPMULDIVSIZES g_iemAImpl_imul =
596{
597 iemAImpl_imul_u8,
598 iemAImpl_imul_u16,
599 iemAImpl_imul_u32,
600 iemAImpl_imul_u64
601};
602
603/** Function table for the DIV instruction. */
604static const IEMOPMULDIVSIZES g_iemAImpl_div =
605{
606 iemAImpl_div_u8,
607 iemAImpl_div_u16,
608 iemAImpl_div_u32,
609 iemAImpl_div_u64
610};
611
612/** Function table for the MUL instruction. */
613static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
614{
615 iemAImpl_idiv_u8,
616 iemAImpl_idiv_u16,
617 iemAImpl_idiv_u32,
618 iemAImpl_idiv_u64
619};
620
621/** Function table for the SHLD instruction */
622static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
623{
624 iemAImpl_shld_u16,
625 iemAImpl_shld_u32,
626 iemAImpl_shld_u64,
627};
628
629/** Function table for the SHRD instruction */
630static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
631{
632 iemAImpl_shrd_u16,
633 iemAImpl_shrd_u32,
634 iemAImpl_shrd_u64,
635};
636
637
638#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
639/** What IEM just wrote. */
640uint8_t g_abIemWrote[256];
641/** How much IEM just wrote. */
642size_t g_cbIemWrote;
643#endif
644
645
646/*******************************************************************************
647* Internal Functions *
648*******************************************************************************/
649static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
650/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
651static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
652static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
653static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
654static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
655static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
656static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
657static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
658static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
659static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
660static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
661static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
662static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
663static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
664static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
665static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
666static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
667static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
668static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
669static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
670static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
671static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
672static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
673
674#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
675static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
676#endif
677static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
678static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
679
680
681/**
682 * Sets the pass up status.
683 *
684 * @returns VINF_SUCCESS.
685 * @param pIemCpu The per CPU IEM state of the calling thread.
686 * @param rcPassUp The pass up status. Must be informational.
687 * VINF_SUCCESS is not allowed.
688 */
689static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
690{
691 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
692
693 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
694 if (rcOldPassUp == VINF_SUCCESS)
695 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
696 /* If both are EM scheduling code, use EM priority rules. */
697 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
698 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
699 {
700 if (rcPassUp < rcOldPassUp)
701 {
702 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
703 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
704 }
705 else
706 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
707 }
708 /* Override EM scheduling with specific status code. */
709 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
710 {
711 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
712 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
713 }
714 /* Don't override specific status code, first come first served. */
715 else
716 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
717 return VINF_SUCCESS;
718}
719
720
721/**
722 * Initializes the decoder state.
723 *
724 * @param pIemCpu The per CPU IEM state.
725 * @param fBypassHandlers Whether to bypass access handlers.
726 */
727DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
728{
729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
730 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
731
732#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
733 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
734 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
735 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
736 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
737 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
738 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
739 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
740 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
741#endif
742
743#ifdef VBOX_WITH_RAW_MODE_NOT_R0
744 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
745#endif
746 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
747#ifdef IEM_VERIFICATION_MODE_FULL
748 if (pIemCpu->uInjectCpl != UINT8_MAX)
749 pIemCpu->uCpl = pIemCpu->uInjectCpl;
750#endif
751 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
752 ? IEMMODE_64BIT
753 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
754 ? IEMMODE_32BIT
755 : IEMMODE_16BIT;
756 pIemCpu->enmCpuMode = enmMode;
757 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
758 pIemCpu->enmEffAddrMode = enmMode;
759 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
760 pIemCpu->enmEffOpSize = enmMode;
761 pIemCpu->fPrefixes = 0;
762 pIemCpu->uRexReg = 0;
763 pIemCpu->uRexB = 0;
764 pIemCpu->uRexIndex = 0;
765 pIemCpu->iEffSeg = X86_SREG_DS;
766 pIemCpu->offOpcode = 0;
767 pIemCpu->cbOpcode = 0;
768 pIemCpu->cActiveMappings = 0;
769 pIemCpu->iNextMapping = 0;
770 pIemCpu->rcPassUp = VINF_SUCCESS;
771 pIemCpu->fBypassHandlers = fBypassHandlers;
772
773}
774
775
776/**
777 * Prefetch opcodes the first time when starting executing.
778 *
779 * @returns Strict VBox status code.
780 * @param pIemCpu The IEM state.
781 * @param fBypassHandlers Whether to bypass access handlers.
782 */
783static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
784{
785#ifdef IEM_VERIFICATION_MODE_FULL
786 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
787#endif
788 iemInitDecoder(pIemCpu, fBypassHandlers);
789
790 /*
791 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
792 *
793 * First translate CS:rIP to a physical address.
794 */
795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
796 uint32_t cbToTryRead;
797 RTGCPTR GCPtrPC;
798 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
799 {
800 cbToTryRead = PAGE_SIZE;
801 GCPtrPC = pCtx->rip;
802 if (!IEM_IS_CANONICAL(GCPtrPC))
803 return iemRaiseGeneralProtectionFault0(pIemCpu);
804 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
805 }
806 else
807 {
808 uint32_t GCPtrPC32 = pCtx->eip;
809 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
810 if (GCPtrPC32 > pCtx->cs.u32Limit)
811 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
812 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
813 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
814 }
815
816#if defined(IN_RC) && defined(VBOX_WITH_RAW_MODE)
817 /* Allow interpretation of patch manager code blocks since they can for
818 instance throw #PFs for perfectly good reasons. */
819 if ( (pCtx->cs.Sel & X86_SEL_RPL) == 1
820 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), GCPtrPC))
821 {
822 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
823 if (cbToTryRead > cbLeftOnPage)
824 cbToTryRead = cbLeftOnPage;
825 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
826 cbToTryRead = sizeof(pIemCpu->abOpcode);
827 memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
828 pIemCpu->cbOpcode = cbToTryRead;
829 return VINF_SUCCESS;
830 }
831#endif
832
833 RTGCPHYS GCPhys;
834 uint64_t fFlags;
835 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
836 if (RT_FAILURE(rc))
837 {
838 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
839 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
840 }
841 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
842 {
843 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
844 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
845 }
846 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
847 {
848 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
849 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
850 }
851 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
852 /** @todo Check reserved bits and such stuff. PGM is better at doing
853 * that, so do it when implementing the guest virtual address
854 * TLB... */
855
856#ifdef IEM_VERIFICATION_MODE_FULL
857 /*
858 * Optimistic optimization: Use unconsumed opcode bytes from the previous
859 * instruction.
860 */
861 /** @todo optimize this differently by not using PGMPhysRead. */
862 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
863 pIemCpu->GCPhysOpcodes = GCPhys;
864 if ( offPrevOpcodes < cbOldOpcodes
865 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
866 {
867 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
868 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
869 pIemCpu->cbOpcode = cbNew;
870 return VINF_SUCCESS;
871 }
872#endif
873
874 /*
875 * Read the bytes at this address.
876 */
877 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
878 if (cbToTryRead > cbLeftOnPage)
879 cbToTryRead = cbLeftOnPage;
880 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
881 cbToTryRead = sizeof(pIemCpu->abOpcode);
882 /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
883 * doing that. */
884 if (!pIemCpu->fBypassHandlers)
885 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
886 else
887 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
888 if (rc != VINF_SUCCESS)
889 {
890 /** @todo status code handling */
891 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
892 GCPtrPC, GCPhys, rc, cbToTryRead));
893 return rc;
894 }
895 pIemCpu->cbOpcode = cbToTryRead;
896
897 return VINF_SUCCESS;
898}
899
900
901/**
902 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
903 * exception if it fails.
904 *
905 * @returns Strict VBox status code.
906 * @param pIemCpu The IEM state.
907 * @param cbMin Where to return the opcode byte.
908 */
909static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
910{
911 /*
912 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
913 *
914 * First translate CS:rIP to a physical address.
915 */
916 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
917 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
918 uint32_t cbToTryRead;
919 RTGCPTR GCPtrNext;
920 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
921 {
922 cbToTryRead = PAGE_SIZE;
923 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
924 if (!IEM_IS_CANONICAL(GCPtrNext))
925 return iemRaiseGeneralProtectionFault0(pIemCpu);
926 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
927 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
928 }
929 else
930 {
931 uint32_t GCPtrNext32 = pCtx->eip;
932 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
933 GCPtrNext32 += pIemCpu->cbOpcode;
934 if (GCPtrNext32 > pCtx->cs.u32Limit)
935 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
936 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
937 if (cbToTryRead < cbMin - cbLeft)
938 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
939 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
940 }
941
942 RTGCPHYS GCPhys;
943 uint64_t fFlags;
944 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
945 if (RT_FAILURE(rc))
946 {
947 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
948 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
949 }
950 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
951 {
952 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
953 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
954 }
955 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
956 {
957 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
958 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
959 }
960 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
961 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
962 /** @todo Check reserved bits and such stuff. PGM is better at doing
963 * that, so do it when implementing the guest virtual address
964 * TLB... */
965
966 /*
967 * Read the bytes at this address.
968 */
969 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
970 if (cbToTryRead > cbLeftOnPage)
971 cbToTryRead = cbLeftOnPage;
972 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
973 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
974 Assert(cbToTryRead >= cbMin - cbLeft);
975 if (!pIemCpu->fBypassHandlers)
976 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
977 else
978 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
979 if (rc != VINF_SUCCESS)
980 {
981 /** @todo status code handling */
982 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
983 return rc;
984 }
985 pIemCpu->cbOpcode += cbToTryRead;
986 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
987
988 return VINF_SUCCESS;
989}
990
991
992/**
993 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
994 *
995 * @returns Strict VBox status code.
996 * @param pIemCpu The IEM state.
997 * @param pb Where to return the opcode byte.
998 */
999DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1000{
1001 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1002 if (rcStrict == VINF_SUCCESS)
1003 {
1004 uint8_t offOpcode = pIemCpu->offOpcode;
1005 *pb = pIemCpu->abOpcode[offOpcode];
1006 pIemCpu->offOpcode = offOpcode + 1;
1007 }
1008 else
1009 *pb = 0;
1010 return rcStrict;
1011}
1012
1013
1014/**
1015 * Fetches the next opcode byte.
1016 *
1017 * @returns Strict VBox status code.
1018 * @param pIemCpu The IEM state.
1019 * @param pu8 Where to return the opcode byte.
1020 */
1021DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1022{
1023 uint8_t const offOpcode = pIemCpu->offOpcode;
1024 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1025 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1026
1027 *pu8 = pIemCpu->abOpcode[offOpcode];
1028 pIemCpu->offOpcode = offOpcode + 1;
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Fetches the next opcode byte, returns automatically on failure.
1035 *
1036 * @param a_pu8 Where to return the opcode byte.
1037 * @remark Implicitly references pIemCpu.
1038 */
1039#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1040 do \
1041 { \
1042 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1043 if (rcStrict2 != VINF_SUCCESS) \
1044 return rcStrict2; \
1045 } while (0)
1046
1047
1048/**
1049 * Fetches the next signed byte from the opcode stream.
1050 *
1051 * @returns Strict VBox status code.
1052 * @param pIemCpu The IEM state.
1053 * @param pi8 Where to return the signed byte.
1054 */
1055DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1056{
1057 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1058}
1059
1060
1061/**
1062 * Fetches the next signed byte from the opcode stream, returning automatically
1063 * on failure.
1064 *
1065 * @param pi8 Where to return the signed byte.
1066 * @remark Implicitly references pIemCpu.
1067 */
1068#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1069 do \
1070 { \
1071 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1072 if (rcStrict2 != VINF_SUCCESS) \
1073 return rcStrict2; \
1074 } while (0)
1075
1076
1077/**
1078 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1079 *
1080 * @returns Strict VBox status code.
1081 * @param pIemCpu The IEM state.
1082 * @param pu16 Where to return the opcode dword.
1083 */
1084DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1085{
1086 uint8_t u8;
1087 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1088 if (rcStrict == VINF_SUCCESS)
1089 *pu16 = (int8_t)u8;
1090 return rcStrict;
1091}
1092
1093
1094/**
1095 * Fetches the next signed byte from the opcode stream, extending it to
1096 * unsigned 16-bit.
1097 *
1098 * @returns Strict VBox status code.
1099 * @param pIemCpu The IEM state.
1100 * @param pu16 Where to return the unsigned word.
1101 */
1102DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1103{
1104 uint8_t const offOpcode = pIemCpu->offOpcode;
1105 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1106 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1107
1108 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1109 pIemCpu->offOpcode = offOpcode + 1;
1110 return VINF_SUCCESS;
1111}
1112
1113
1114/**
1115 * Fetches the next signed byte from the opcode stream and sign-extending it to
1116 * a word, returning automatically on failure.
1117 *
1118 * @param pu16 Where to return the word.
1119 * @remark Implicitly references pIemCpu.
1120 */
1121#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1122 do \
1123 { \
1124 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1125 if (rcStrict2 != VINF_SUCCESS) \
1126 return rcStrict2; \
1127 } while (0)
1128
1129
1130/**
1131 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1132 *
1133 * @returns Strict VBox status code.
1134 * @param pIemCpu The IEM state.
1135 * @param pu32 Where to return the opcode dword.
1136 */
1137DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1138{
1139 uint8_t u8;
1140 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1141 if (rcStrict == VINF_SUCCESS)
1142 *pu32 = (int8_t)u8;
1143 return rcStrict;
1144}
1145
1146
1147/**
1148 * Fetches the next signed byte from the opcode stream, extending it to
1149 * unsigned 32-bit.
1150 *
1151 * @returns Strict VBox status code.
1152 * @param pIemCpu The IEM state.
1153 * @param pu32 Where to return the unsigned dword.
1154 */
1155DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1156{
1157 uint8_t const offOpcode = pIemCpu->offOpcode;
1158 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1159 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1160
1161 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1162 pIemCpu->offOpcode = offOpcode + 1;
1163 return VINF_SUCCESS;
1164}
1165
1166
1167/**
1168 * Fetches the next signed byte from the opcode stream and sign-extending it to
1169 * a word, returning automatically on failure.
1170 *
1171 * @param pu32 Where to return the word.
1172 * @remark Implicitly references pIemCpu.
1173 */
1174#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1175 do \
1176 { \
1177 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1178 if (rcStrict2 != VINF_SUCCESS) \
1179 return rcStrict2; \
1180 } while (0)
1181
1182
1183/**
1184 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1185 *
1186 * @returns Strict VBox status code.
1187 * @param pIemCpu The IEM state.
1188 * @param pu64 Where to return the opcode qword.
1189 */
1190DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1191{
1192 uint8_t u8;
1193 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1194 if (rcStrict == VINF_SUCCESS)
1195 *pu64 = (int8_t)u8;
1196 return rcStrict;
1197}
1198
1199
1200/**
1201 * Fetches the next signed byte from the opcode stream, extending it to
1202 * unsigned 64-bit.
1203 *
1204 * @returns Strict VBox status code.
1205 * @param pIemCpu The IEM state.
1206 * @param pu64 Where to return the unsigned qword.
1207 */
1208DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1209{
1210 uint8_t const offOpcode = pIemCpu->offOpcode;
1211 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1212 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1213
1214 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1215 pIemCpu->offOpcode = offOpcode + 1;
1216 return VINF_SUCCESS;
1217}
1218
1219
1220/**
1221 * Fetches the next signed byte from the opcode stream and sign-extending it to
1222 * a word, returning automatically on failure.
1223 *
1224 * @param pu64 Where to return the word.
1225 * @remark Implicitly references pIemCpu.
1226 */
1227#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1228 do \
1229 { \
1230 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1231 if (rcStrict2 != VINF_SUCCESS) \
1232 return rcStrict2; \
1233 } while (0)
1234
1235
1236/**
1237 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1238 *
1239 * @returns Strict VBox status code.
1240 * @param pIemCpu The IEM state.
1241 * @param pu16 Where to return the opcode word.
1242 */
1243DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1244{
1245 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1246 if (rcStrict == VINF_SUCCESS)
1247 {
1248 uint8_t offOpcode = pIemCpu->offOpcode;
1249 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1250 pIemCpu->offOpcode = offOpcode + 2;
1251 }
1252 else
1253 *pu16 = 0;
1254 return rcStrict;
1255}
1256
1257
1258/**
1259 * Fetches the next opcode word.
1260 *
1261 * @returns Strict VBox status code.
1262 * @param pIemCpu The IEM state.
1263 * @param pu16 Where to return the opcode word.
1264 */
1265DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1266{
1267 uint8_t const offOpcode = pIemCpu->offOpcode;
1268 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1269 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1270
1271 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1272 pIemCpu->offOpcode = offOpcode + 2;
1273 return VINF_SUCCESS;
1274}
1275
1276
1277/**
1278 * Fetches the next opcode word, returns automatically on failure.
1279 *
1280 * @param a_pu16 Where to return the opcode word.
1281 * @remark Implicitly references pIemCpu.
1282 */
1283#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1284 do \
1285 { \
1286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1287 if (rcStrict2 != VINF_SUCCESS) \
1288 return rcStrict2; \
1289 } while (0)
1290
1291
1292/**
1293 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pIemCpu The IEM state.
1297 * @param pu32 Where to return the opcode double word.
1298 */
1299DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1300{
1301 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1302 if (rcStrict == VINF_SUCCESS)
1303 {
1304 uint8_t offOpcode = pIemCpu->offOpcode;
1305 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1306 pIemCpu->offOpcode = offOpcode + 2;
1307 }
1308 else
1309 *pu32 = 0;
1310 return rcStrict;
1311}
1312
1313
1314/**
1315 * Fetches the next opcode word, zero extending it to a double word.
1316 *
1317 * @returns Strict VBox status code.
1318 * @param pIemCpu The IEM state.
1319 * @param pu32 Where to return the opcode double word.
1320 */
1321DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1322{
1323 uint8_t const offOpcode = pIemCpu->offOpcode;
1324 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1325 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1326
1327 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1328 pIemCpu->offOpcode = offOpcode + 2;
1329 return VINF_SUCCESS;
1330}
1331
1332
1333/**
1334 * Fetches the next opcode word and zero extends it to a double word, returns
1335 * automatically on failure.
1336 *
1337 * @param a_pu32 Where to return the opcode double word.
1338 * @remark Implicitly references pIemCpu.
1339 */
1340#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1341 do \
1342 { \
1343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1344 if (rcStrict2 != VINF_SUCCESS) \
1345 return rcStrict2; \
1346 } while (0)
1347
1348
1349/**
1350 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1351 *
1352 * @returns Strict VBox status code.
1353 * @param pIemCpu The IEM state.
1354 * @param pu64 Where to return the opcode quad word.
1355 */
1356DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1357{
1358 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1359 if (rcStrict == VINF_SUCCESS)
1360 {
1361 uint8_t offOpcode = pIemCpu->offOpcode;
1362 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1363 pIemCpu->offOpcode = offOpcode + 2;
1364 }
1365 else
1366 *pu64 = 0;
1367 return rcStrict;
1368}
1369
1370
1371/**
1372 * Fetches the next opcode word, zero extending it to a quad word.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pIemCpu The IEM state.
1376 * @param pu64 Where to return the opcode quad word.
1377 */
1378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1379{
1380 uint8_t const offOpcode = pIemCpu->offOpcode;
1381 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1382 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1383
1384 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1385 pIemCpu->offOpcode = offOpcode + 2;
1386 return VINF_SUCCESS;
1387}
1388
1389
1390/**
1391 * Fetches the next opcode word and zero extends it to a quad word, returns
1392 * automatically on failure.
1393 *
1394 * @param a_pu64 Where to return the opcode quad word.
1395 * @remark Implicitly references pIemCpu.
1396 */
1397#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1398 do \
1399 { \
1400 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1401 if (rcStrict2 != VINF_SUCCESS) \
1402 return rcStrict2; \
1403 } while (0)
1404
1405
1406/**
1407 * Fetches the next signed word from the opcode stream.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pIemCpu The IEM state.
1411 * @param pi16 Where to return the signed word.
1412 */
1413DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1414{
1415 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1416}
1417
1418
1419/**
1420 * Fetches the next signed word from the opcode stream, returning automatically
1421 * on failure.
1422 *
1423 * @param pi16 Where to return the signed word.
1424 * @remark Implicitly references pIemCpu.
1425 */
1426#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1427 do \
1428 { \
1429 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1430 if (rcStrict2 != VINF_SUCCESS) \
1431 return rcStrict2; \
1432 } while (0)
1433
1434
1435/**
1436 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1437 *
1438 * @returns Strict VBox status code.
1439 * @param pIemCpu The IEM state.
1440 * @param pu32 Where to return the opcode dword.
1441 */
1442DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1443{
1444 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1445 if (rcStrict == VINF_SUCCESS)
1446 {
1447 uint8_t offOpcode = pIemCpu->offOpcode;
1448 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1449 pIemCpu->abOpcode[offOpcode + 1],
1450 pIemCpu->abOpcode[offOpcode + 2],
1451 pIemCpu->abOpcode[offOpcode + 3]);
1452 pIemCpu->offOpcode = offOpcode + 4;
1453 }
1454 else
1455 *pu32 = 0;
1456 return rcStrict;
1457}
1458
1459
1460/**
1461 * Fetches the next opcode dword.
1462 *
1463 * @returns Strict VBox status code.
1464 * @param pIemCpu The IEM state.
1465 * @param pu32 Where to return the opcode double word.
1466 */
1467DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1468{
1469 uint8_t const offOpcode = pIemCpu->offOpcode;
1470 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1471 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1472
1473 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1474 pIemCpu->abOpcode[offOpcode + 1],
1475 pIemCpu->abOpcode[offOpcode + 2],
1476 pIemCpu->abOpcode[offOpcode + 3]);
1477 pIemCpu->offOpcode = offOpcode + 4;
1478 return VINF_SUCCESS;
1479}
1480
1481
1482/**
1483 * Fetches the next opcode dword, returns automatically on failure.
1484 *
1485 * @param a_pu32 Where to return the opcode dword.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu32 Where to return the opcode dword.
1503 */
1504DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1511 pIemCpu->abOpcode[offOpcode + 1],
1512 pIemCpu->abOpcode[offOpcode + 2],
1513 pIemCpu->abOpcode[offOpcode + 3]);
1514 pIemCpu->offOpcode = offOpcode + 4;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521
1522/**
1523 * Fetches the next opcode dword, zero extending it to a quad word.
1524 *
1525 * @returns Strict VBox status code.
1526 * @param pIemCpu The IEM state.
1527 * @param pu64 Where to return the opcode quad word.
1528 */
1529DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1530{
1531 uint8_t const offOpcode = pIemCpu->offOpcode;
1532 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1533 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1534
1535 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1536 pIemCpu->abOpcode[offOpcode + 1],
1537 pIemCpu->abOpcode[offOpcode + 2],
1538 pIemCpu->abOpcode[offOpcode + 3]);
1539 pIemCpu->offOpcode = offOpcode + 4;
1540 return VINF_SUCCESS;
1541}
1542
1543
1544/**
1545 * Fetches the next opcode dword and zero extends it to a quad word, returns
1546 * automatically on failure.
1547 *
1548 * @param a_pu64 Where to return the opcode quad word.
1549 * @remark Implicitly references pIemCpu.
1550 */
1551#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1552 do \
1553 { \
1554 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1555 if (rcStrict2 != VINF_SUCCESS) \
1556 return rcStrict2; \
1557 } while (0)
1558
1559
1560/**
1561 * Fetches the next signed double word from the opcode stream.
1562 *
1563 * @returns Strict VBox status code.
1564 * @param pIemCpu The IEM state.
1565 * @param pi32 Where to return the signed double word.
1566 */
1567DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1568{
1569 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1570}
1571
1572/**
1573 * Fetches the next signed double word from the opcode stream, returning
1574 * automatically on failure.
1575 *
1576 * @param pi32 Where to return the signed double word.
1577 * @remark Implicitly references pIemCpu.
1578 */
1579#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1580 do \
1581 { \
1582 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1583 if (rcStrict2 != VINF_SUCCESS) \
1584 return rcStrict2; \
1585 } while (0)
1586
1587
1588/**
1589 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1590 *
1591 * @returns Strict VBox status code.
1592 * @param pIemCpu The IEM state.
1593 * @param pu64 Where to return the opcode qword.
1594 */
1595DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1596{
1597 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1598 if (rcStrict == VINF_SUCCESS)
1599 {
1600 uint8_t offOpcode = pIemCpu->offOpcode;
1601 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1602 pIemCpu->abOpcode[offOpcode + 1],
1603 pIemCpu->abOpcode[offOpcode + 2],
1604 pIemCpu->abOpcode[offOpcode + 3]);
1605 pIemCpu->offOpcode = offOpcode + 4;
1606 }
1607 else
1608 *pu64 = 0;
1609 return rcStrict;
1610}
1611
1612
1613/**
1614 * Fetches the next opcode dword, sign extending it into a quad word.
1615 *
1616 * @returns Strict VBox status code.
1617 * @param pIemCpu The IEM state.
1618 * @param pu64 Where to return the opcode quad word.
1619 */
1620DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1621{
1622 uint8_t const offOpcode = pIemCpu->offOpcode;
1623 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1624 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1625
1626 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1627 pIemCpu->abOpcode[offOpcode + 1],
1628 pIemCpu->abOpcode[offOpcode + 2],
1629 pIemCpu->abOpcode[offOpcode + 3]);
1630 *pu64 = i32;
1631 pIemCpu->offOpcode = offOpcode + 4;
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Fetches the next opcode double word and sign extends it to a quad word,
1638 * returns automatically on failure.
1639 *
1640 * @param a_pu64 Where to return the opcode quad word.
1641 * @remark Implicitly references pIemCpu.
1642 */
1643#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1644 do \
1645 { \
1646 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1647 if (rcStrict2 != VINF_SUCCESS) \
1648 return rcStrict2; \
1649 } while (0)
1650
1651
1652/**
1653 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1654 *
1655 * @returns Strict VBox status code.
1656 * @param pIemCpu The IEM state.
1657 * @param pu64 Where to return the opcode qword.
1658 */
1659DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1660{
1661 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1662 if (rcStrict == VINF_SUCCESS)
1663 {
1664 uint8_t offOpcode = pIemCpu->offOpcode;
1665 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1666 pIemCpu->abOpcode[offOpcode + 1],
1667 pIemCpu->abOpcode[offOpcode + 2],
1668 pIemCpu->abOpcode[offOpcode + 3],
1669 pIemCpu->abOpcode[offOpcode + 4],
1670 pIemCpu->abOpcode[offOpcode + 5],
1671 pIemCpu->abOpcode[offOpcode + 6],
1672 pIemCpu->abOpcode[offOpcode + 7]);
1673 pIemCpu->offOpcode = offOpcode + 8;
1674 }
1675 else
1676 *pu64 = 0;
1677 return rcStrict;
1678}
1679
1680
1681/**
1682 * Fetches the next opcode qword.
1683 *
1684 * @returns Strict VBox status code.
1685 * @param pIemCpu The IEM state.
1686 * @param pu64 Where to return the opcode qword.
1687 */
1688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1689{
1690 uint8_t const offOpcode = pIemCpu->offOpcode;
1691 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1692 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1693
1694 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1695 pIemCpu->abOpcode[offOpcode + 1],
1696 pIemCpu->abOpcode[offOpcode + 2],
1697 pIemCpu->abOpcode[offOpcode + 3],
1698 pIemCpu->abOpcode[offOpcode + 4],
1699 pIemCpu->abOpcode[offOpcode + 5],
1700 pIemCpu->abOpcode[offOpcode + 6],
1701 pIemCpu->abOpcode[offOpcode + 7]);
1702 pIemCpu->offOpcode = offOpcode + 8;
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Fetches the next opcode quad word, returns automatically on failure.
1709 *
1710 * @param a_pu64 Where to return the opcode quad word.
1711 * @remark Implicitly references pIemCpu.
1712 */
1713#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1714 do \
1715 { \
1716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1717 if (rcStrict2 != VINF_SUCCESS) \
1718 return rcStrict2; \
1719 } while (0)
1720
1721
1722/** @name Misc Worker Functions.
1723 * @{
1724 */
1725
1726
1727/**
1728 * Validates a new SS segment.
1729 *
1730 * @returns VBox strict status code.
1731 * @param pIemCpu The IEM per CPU instance data.
1732 * @param pCtx The CPU context.
1733 * @param NewSS The new SS selctor.
1734 * @param uCpl The CPL to load the stack for.
1735 * @param pDesc Where to return the descriptor.
1736 */
1737static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1738{
1739 NOREF(pCtx);
1740
1741 /* Null selectors are not allowed (we're not called for dispatching
1742 interrupts with SS=0 in long mode). */
1743 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1744 {
1745 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
1746 return iemRaiseGeneralProtectionFault0(pIemCpu);
1747 }
1748
1749 /*
1750 * Read the descriptor.
1751 */
1752 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
1753 if (rcStrict != VINF_SUCCESS)
1754 return rcStrict;
1755
1756 /*
1757 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1758 */
1759 if (!pDesc->Legacy.Gen.u1DescType)
1760 {
1761 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1762 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1763 }
1764
1765 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1766 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1767 {
1768 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
1769 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1770 }
1771 /** @todo testcase: check if the TSS.ssX RPL is checked. */
1772 if ((NewSS & X86_SEL_RPL) != uCpl)
1773 {
1774 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
1775 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1776 }
1777 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1778 {
1779 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1780 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
1781 }
1782
1783 /* Is it there? */
1784 /** @todo testcase: Is this checked before the canonical / limit check below? */
1785 if (!pDesc->Legacy.Gen.u1Present)
1786 {
1787 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1788 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1789 }
1790
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1797 * not.
1798 *
1799 * @param a_pIemCpu The IEM per CPU data.
1800 * @param a_pCtx The CPU context.
1801 */
1802#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1803# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1804 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1805 ? (a_pCtx)->eflags.u \
1806 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1807#else
1808# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1809 ( (a_pCtx)->eflags.u )
1810#endif
1811
1812/**
1813 * Updates the EFLAGS in the correct manner wrt. PATM.
1814 *
1815 * @param a_pIemCpu The IEM per CPU data.
1816 * @param a_pCtx The CPU context.
1817 */
1818#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1819# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1820 do { \
1821 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1822 (a_pCtx)->eflags.u = (a_fEfl); \
1823 else \
1824 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1825 } while (0)
1826#else
1827# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1828 do { \
1829 (a_pCtx)->eflags.u = (a_fEfl); \
1830 } while (0)
1831#endif
1832
1833
1834/** @} */
1835
1836/** @name Raising Exceptions.
1837 *
1838 * @{
1839 */
1840
1841/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
1842 * @{ */
1843/** CPU exception. */
1844#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
1845/** External interrupt (from PIC, APIC, whatever). */
1846#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
1847/** Software interrupt (int, into or bound). */
1848#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
1849/** Takes an error code. */
1850#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
1851/** Takes a CR2. */
1852#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
1853/** Generated by the breakpoint instruction. */
1854#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
1855/** @} */
1856
1857/**
1858 * Loads the specified stack far pointer from the TSS.
1859 *
1860 * @returns VBox strict status code.
1861 * @param pIemCpu The IEM per CPU instance data.
1862 * @param pCtx The CPU context.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pSelSS Where to return the new stack segment.
1865 * @param puEsp Where to return the new stack pointer.
1866 */
1867static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
1868 PRTSEL pSelSS, uint32_t *puEsp)
1869{
1870 VBOXSTRICTRC rcStrict;
1871 Assert(uCpl < 4);
1872 *puEsp = 0; /* make gcc happy */
1873 *pSelSS = 0; /* make gcc happy */
1874
1875 switch (pCtx->tr.Attr.n.u4Type)
1876 {
1877 /*
1878 * 16-bit TSS (X86TSS16).
1879 */
1880 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
1881 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1882 {
1883 uint32_t off = uCpl * 4 + 2;
1884 if (off + 4 > pCtx->tr.u32Limit)
1885 {
1886 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1887 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1888 }
1889
1890 uint32_t u32Tmp = 0; /* gcc maybe... */
1891 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1892 if (rcStrict == VINF_SUCCESS)
1893 {
1894 *puEsp = RT_LOWORD(u32Tmp);
1895 *pSelSS = RT_HIWORD(u32Tmp);
1896 return VINF_SUCCESS;
1897 }
1898 break;
1899 }
1900
1901 /*
1902 * 32-bit TSS (X86TSS32).
1903 */
1904 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
1905 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1906 {
1907 uint32_t off = uCpl * 8 + 4;
1908 if (off + 7 > pCtx->tr.u32Limit)
1909 {
1910 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
1911 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
1912 }
1913
1914 uint64_t u64Tmp;
1915 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
1916 if (rcStrict == VINF_SUCCESS)
1917 {
1918 *puEsp = u64Tmp & UINT32_MAX;
1919 *pSelSS = (RTSEL)(u64Tmp >> 32);
1920 return VINF_SUCCESS;
1921 }
1922 break;
1923 }
1924
1925 default:
1926 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1927 }
1928 return rcStrict;
1929}
1930
1931
1932/**
1933 * Adjust the CPU state according to the exception being raised.
1934 *
1935 * @param pCtx The CPU context.
1936 * @param u8Vector The exception that has been raised.
1937 */
1938DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
1939{
1940 switch (u8Vector)
1941 {
1942 case X86_XCPT_DB:
1943 pCtx->dr[7] &= ~X86_DR7_GD;
1944 break;
1945 /** @todo Read the AMD and Intel exception reference... */
1946 }
1947}
1948
1949
1950/**
1951 * Implements exceptions and interrupts for real mode.
1952 *
1953 * @returns VBox strict status code.
1954 * @param pIemCpu The IEM per CPU instance data.
1955 * @param pCtx The CPU context.
1956 * @param cbInstr The number of bytes to offset rIP by in the return
1957 * address.
1958 * @param u8Vector The interrupt / exception vector number.
1959 * @param fFlags The flags.
1960 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1961 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1962 */
1963static VBOXSTRICTRC
1964iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
1965 PCPUMCTX pCtx,
1966 uint8_t cbInstr,
1967 uint8_t u8Vector,
1968 uint32_t fFlags,
1969 uint16_t uErr,
1970 uint64_t uCr2)
1971{
1972 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
1973 NOREF(uErr); NOREF(uCr2);
1974
1975 /*
1976 * Read the IDT entry.
1977 */
1978 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1979 {
1980 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
1981 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1982 }
1983 RTFAR16 Idte;
1984 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
1985 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
1986 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1987 return rcStrict;
1988
1989 /*
1990 * Push the stack frame.
1991 */
1992 uint16_t *pu16Frame;
1993 uint64_t uNewRsp;
1994 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
1995 if (rcStrict != VINF_SUCCESS)
1996 return rcStrict;
1997
1998 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
1999 pu16Frame[2] = (uint16_t)fEfl;
2000 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2001 pu16Frame[0] = pCtx->ip + cbInstr;
2002 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2003 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2004 return rcStrict;
2005
2006 /*
2007 * Load the vector address into cs:ip and make exception specific state
2008 * adjustments.
2009 */
2010 pCtx->cs.Sel = Idte.sel;
2011 pCtx->cs.ValidSel = Idte.sel;
2012 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2013 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2014 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2015 pCtx->rip = Idte.off;
2016 fEfl &= ~X86_EFL_IF;
2017 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2018
2019 /** @todo do we actually do this in real mode? */
2020 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2021 iemRaiseXcptAdjustState(pCtx, u8Vector);
2022
2023 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Implements exceptions and interrupts for protected mode.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pIemCpu The IEM per CPU instance data.
2032 * @param pCtx The CPU context.
2033 * @param cbInstr The number of bytes to offset rIP by in the return
2034 * address.
2035 * @param u8Vector The interrupt / exception vector number.
2036 * @param fFlags The flags.
2037 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2038 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2039 */
2040static VBOXSTRICTRC
2041iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2042 PCPUMCTX pCtx,
2043 uint8_t cbInstr,
2044 uint8_t u8Vector,
2045 uint32_t fFlags,
2046 uint16_t uErr,
2047 uint64_t uCr2)
2048{
2049 NOREF(cbInstr);
2050
2051 /*
2052 * Read the IDT entry.
2053 */
2054 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2055 {
2056 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2057 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2058 }
2059 X86DESC Idte;
2060 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2061 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2062 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2063 return rcStrict;
2064 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2065 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2066 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2067
2068 /*
2069 * Check the descriptor type, DPL and such.
2070 * ASSUMES this is done in the same order as described for call-gate calls.
2071 */
2072 if (Idte.Gate.u1DescType)
2073 {
2074 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2075 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2076 }
2077 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2078 switch (Idte.Gate.u4Type)
2079 {
2080 case X86_SEL_TYPE_SYS_UNDEFINED:
2081 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2082 case X86_SEL_TYPE_SYS_LDT:
2083 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2084 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2085 case X86_SEL_TYPE_SYS_UNDEFINED2:
2086 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2087 case X86_SEL_TYPE_SYS_UNDEFINED3:
2088 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2089 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2090 case X86_SEL_TYPE_SYS_UNDEFINED4:
2091 {
2092 /** @todo check what actually happens when the type is wrong...
2093 * esp. call gates. */
2094 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2095 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097
2098 case X86_SEL_TYPE_SYS_286_INT_GATE:
2099 case X86_SEL_TYPE_SYS_386_INT_GATE:
2100 fEflToClear |= X86_EFL_IF;
2101 break;
2102
2103 case X86_SEL_TYPE_SYS_TASK_GATE:
2104 /** @todo task gates. */
2105 AssertFailedReturn(VERR_NOT_SUPPORTED);
2106
2107 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2108 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2109 break;
2110
2111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2112 }
2113
2114 /* Check DPL against CPL if applicable. */
2115 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2116 {
2117 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2118 {
2119 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2120 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2121 }
2122 }
2123
2124 /* Is it there? */
2125 if (!Idte.Gate.u1Present)
2126 {
2127 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2128 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2129 }
2130
2131 /* A null CS is bad. */
2132 RTSEL NewCS = Idte.Gate.u16Sel;
2133 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2134 {
2135 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2136 return iemRaiseGeneralProtectionFault0(pIemCpu);
2137 }
2138
2139 /* Fetch the descriptor for the new CS. */
2140 IEMSELDESC DescCS;
2141 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2142 if (rcStrict != VINF_SUCCESS)
2143 {
2144 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2145 return rcStrict;
2146 }
2147
2148 /* Must be a code segment. */
2149 if (!DescCS.Legacy.Gen.u1DescType)
2150 {
2151 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2152 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2153 }
2154 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2155 {
2156 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2157 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2158 }
2159
2160 /* Don't allow lowering the privilege level. */
2161 /** @todo Does the lowering of privileges apply to software interrupts
2162 * only? This has bearings on the more-privileged or
2163 * same-privilege stack behavior further down. A testcase would
2164 * be nice. */
2165 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2166 {
2167 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2168 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2169 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2170 }
2171 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
2172
2173 /* Check the new EIP against the new CS limit. */
2174 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2175 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2176 ? Idte.Gate.u16OffsetLow
2177 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2178 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2179 if (uNewEip > cbLimitCS)
2180 {
2181 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2182 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2183 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2184 }
2185
2186 /* Make sure the selector is present. */
2187 if (!DescCS.Legacy.Gen.u1Present)
2188 {
2189 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2190 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2191 }
2192
2193 /*
2194 * If the privilege level changes, we need to get a new stack from the TSS.
2195 * This in turns means validating the new SS and ESP...
2196 */
2197 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2198 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2199 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2200 if (uNewCpl != pIemCpu->uCpl)
2201 {
2202 RTSEL NewSS;
2203 uint32_t uNewEsp;
2204 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2205 if (rcStrict != VINF_SUCCESS)
2206 return rcStrict;
2207
2208 IEMSELDESC DescSS;
2209 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2210 if (rcStrict != VINF_SUCCESS)
2211 return rcStrict;
2212
2213 /* Check that there is sufficient space for the stack frame. */
2214 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2215 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2216 {
2217 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2218 }
2219
2220 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
2221 if ( uNewEsp - 1 > cbLimitSS
2222 || uNewEsp < cbStackFrame)
2223 {
2224 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2225 u8Vector, NewSS, uNewEsp, cbStackFrame));
2226 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2227 }
2228
2229 /*
2230 * Start making changes.
2231 */
2232
2233 /* Create the stack frame. */
2234 RTPTRUNION uStackFrame;
2235 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2236 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 void * const pvStackFrame = uStackFrame.pv;
2240
2241 if (fFlags & IEM_XCPT_FLAGS_ERR)
2242 *uStackFrame.pu32++ = uErr;
2243 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2244 ? pCtx->eip + cbInstr : pCtx->eip;
2245 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2246 uStackFrame.pu32[2] = fEfl;
2247 uStackFrame.pu32[3] = pCtx->esp;
2248 uStackFrame.pu32[4] = pCtx->ss.Sel;
2249 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2250 if (rcStrict != VINF_SUCCESS)
2251 return rcStrict;
2252
2253 /* Mark the selectors 'accessed' (hope this is the correct time). */
2254 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2255 * after pushing the stack frame? (Write protect the gdt + stack to
2256 * find out.) */
2257 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2258 {
2259 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2260 if (rcStrict != VINF_SUCCESS)
2261 return rcStrict;
2262 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2263 }
2264
2265 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2266 {
2267 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2268 if (rcStrict != VINF_SUCCESS)
2269 return rcStrict;
2270 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2271 }
2272
2273 /*
2274 * Start comitting the register changes (joins with the DPL=CPL branch).
2275 */
2276 pCtx->ss.Sel = NewSS;
2277 pCtx->ss.ValidSel = NewSS;
2278 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2279 pCtx->ss.u32Limit = cbLimitSS;
2280 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2281 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2282 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2283 pIemCpu->uCpl = uNewCpl;
2284 }
2285 /*
2286 * Same privilege, no stack change and smaller stack frame.
2287 */
2288 else
2289 {
2290 uint64_t uNewRsp;
2291 RTPTRUNION uStackFrame;
2292 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2293 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 void * const pvStackFrame = uStackFrame.pv;
2297
2298 if (fFlags & IEM_XCPT_FLAGS_ERR)
2299 *uStackFrame.pu32++ = uErr;
2300 uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2301 ? pCtx->eip + cbInstr : pCtx->eip;
2302 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2303 uStackFrame.pu32[2] = fEfl;
2304 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2305 if (rcStrict != VINF_SUCCESS)
2306 return rcStrict;
2307
2308 /* Mark the CS selector as 'accessed'. */
2309 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2310 {
2311 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2312 if (rcStrict != VINF_SUCCESS)
2313 return rcStrict;
2314 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2315 }
2316
2317 /*
2318 * Start committing the register changes (joins with the other branch).
2319 */
2320 pCtx->rsp = uNewRsp;
2321 }
2322
2323 /* ... register committing continues. */
2324 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2325 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2327 pCtx->cs.u32Limit = cbLimitCS;
2328 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2329 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2330
2331 pCtx->rip = uNewEip;
2332 fEfl &= ~fEflToClear;
2333 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2334
2335 if (fFlags & IEM_XCPT_FLAGS_CR2)
2336 pCtx->cr2 = uCr2;
2337
2338 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2339 iemRaiseXcptAdjustState(pCtx, u8Vector);
2340
2341 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2342}
2343
2344
2345/**
2346 * Implements exceptions and interrupts for V8086 mode.
2347 *
2348 * @returns VBox strict status code.
2349 * @param pIemCpu The IEM per CPU instance data.
2350 * @param pCtx The CPU context.
2351 * @param cbInstr The number of bytes to offset rIP by in the return
2352 * address.
2353 * @param u8Vector The interrupt / exception vector number.
2354 * @param fFlags The flags.
2355 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2356 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2357 */
2358static VBOXSTRICTRC
2359iemRaiseXcptOrIntInV8086Mode(PIEMCPU pIemCpu,
2360 PCPUMCTX pCtx,
2361 uint8_t cbInstr,
2362 uint8_t u8Vector,
2363 uint32_t fFlags,
2364 uint16_t uErr,
2365 uint64_t uCr2)
2366{
2367 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2368 /** @todo implement me. */
2369 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2370}
2371
2372
2373/**
2374 * Implements exceptions and interrupts for long mode.
2375 *
2376 * @returns VBox strict status code.
2377 * @param pIemCpu The IEM per CPU instance data.
2378 * @param pCtx The CPU context.
2379 * @param cbInstr The number of bytes to offset rIP by in the return
2380 * address.
2381 * @param u8Vector The interrupt / exception vector number.
2382 * @param fFlags The flags.
2383 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2384 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2385 */
2386static VBOXSTRICTRC
2387iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2388 PCPUMCTX pCtx,
2389 uint8_t cbInstr,
2390 uint8_t u8Vector,
2391 uint32_t fFlags,
2392 uint16_t uErr,
2393 uint64_t uCr2)
2394{
2395 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2396 /** @todo implement me. */
2397 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
2398}
2399
2400
2401/**
2402 * Implements exceptions and interrupts.
2403 *
2404 * All exceptions and interrupts goes thru this function!
2405 *
2406 * @returns VBox strict status code.
2407 * @param pIemCpu The IEM per CPU instance data.
2408 * @param cbInstr The number of bytes to offset rIP by in the return
2409 * address.
2410 * @param u8Vector The interrupt / exception vector number.
2411 * @param fFlags The flags.
2412 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2413 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2414 */
2415DECL_NO_INLINE(static, VBOXSTRICTRC)
2416iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2417 uint8_t cbInstr,
2418 uint8_t u8Vector,
2419 uint32_t fFlags,
2420 uint16_t uErr,
2421 uint64_t uCr2)
2422{
2423 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2424
2425 /*
2426 * Do recursion accounting.
2427 */
2428 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2429 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2430 if (pIemCpu->cXcptRecursions == 0)
2431 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2432 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2433 else
2434 {
2435 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2436 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2437
2438 /** @todo double and tripple faults. */
2439 if (pIemCpu->cXcptRecursions >= 3)
2440 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2441
2442 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2443 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2444 {
2445 ....
2446 } */
2447 }
2448 pIemCpu->cXcptRecursions++;
2449 pIemCpu->uCurXcpt = u8Vector;
2450 pIemCpu->fCurXcpt = fFlags;
2451
2452 /*
2453 * Extensive logging.
2454 */
2455#if defined(LOG_ENABLED) && defined(IN_RING3)
2456 if (LogIs3Enabled())
2457 {
2458 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2459 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2460 char szRegs[4096];
2461 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2462 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2463 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2464 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2465 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2466 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2467 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2468 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2469 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2470 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2471 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2472 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2473 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2474 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2475 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2476 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2477 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2478 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2479 " efer=%016VR{efer}\n"
2480 " pat=%016VR{pat}\n"
2481 " sf_mask=%016VR{sf_mask}\n"
2482 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2483 " lstar=%016VR{lstar}\n"
2484 " star=%016VR{star} cstar=%016VR{cstar}\n"
2485 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2486 );
2487
2488 char szInstr[256];
2489 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2490 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2491 szInstr, sizeof(szInstr), NULL);
2492 Log3(("%s%s\n", szRegs, szInstr));
2493 }
2494#endif /* LOG_ENABLED */
2495
2496 /*
2497 * Call the mode specific worker function.
2498 */
2499 VBOXSTRICTRC rcStrict;
2500 if (!(pCtx->cr0 & X86_CR0_PE))
2501 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2502 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2503 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2504 else if (!pCtx->eflags.Bits.u1VM)
2505 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2506 else
2507 rcStrict = iemRaiseXcptOrIntInV8086Mode(pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
2508
2509 /*
2510 * Unwind.
2511 */
2512 pIemCpu->cXcptRecursions--;
2513 pIemCpu->uCurXcpt = uPrevXcpt;
2514 pIemCpu->fCurXcpt = fPrevXcpt;
2515 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
2516 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
2517 return rcStrict;
2518}
2519
2520
2521/** \#DE - 00. */
2522DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
2523{
2524 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2525}
2526
2527
2528/** \#DB - 01. */
2529DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
2530{
2531 /** @todo set/clear RF. */
2532 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2533}
2534
2535
2536/** \#UD - 06. */
2537DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
2538{
2539 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2540}
2541
2542
2543/** \#NM - 07. */
2544DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
2545{
2546 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2547}
2548
2549
2550#ifdef SOME_UNUSED_FUNCTION
2551/** \#TS(err) - 0a. */
2552DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2553{
2554 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2555}
2556#endif
2557
2558
2559/** \#TS(tr) - 0a. */
2560DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
2561{
2562 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2563 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
2564}
2565
2566
2567/** \#NP(err) - 0b. */
2568DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
2569{
2570 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2571}
2572
2573
2574/** \#NP(seg) - 0b. */
2575DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
2576{
2577 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2578 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
2579}
2580
2581
2582/** \#NP(sel) - 0b. */
2583DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2584{
2585 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2586 uSel & ~X86_SEL_RPL, 0);
2587}
2588
2589
2590/** \#SS(seg) - 0c. */
2591DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
2592{
2593 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2594 uSel & ~X86_SEL_RPL, 0);
2595}
2596
2597
2598/** \#GP(n) - 0d. */
2599DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
2600{
2601 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2602}
2603
2604
2605/** \#GP(0) - 0d. */
2606DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
2607{
2608 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2609}
2610
2611
2612/** \#GP(sel) - 0d. */
2613DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2614{
2615 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2616 Sel & ~X86_SEL_RPL, 0);
2617}
2618
2619
2620/** \#GP(0) - 0d. */
2621DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
2622{
2623 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2624}
2625
2626
2627/** \#GP(sel) - 0d. */
2628DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2629{
2630 NOREF(iSegReg); NOREF(fAccess);
2631 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2632 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2633}
2634
2635
2636/** \#GP(sel) - 0d. */
2637DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
2638{
2639 NOREF(Sel);
2640 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2641}
2642
2643
2644/** \#GP(sel) - 0d. */
2645DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
2646{
2647 NOREF(iSegReg); NOREF(fAccess);
2648 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2649}
2650
2651
2652/** \#PF(n) - 0e. */
2653DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
2654{
2655 uint16_t uErr;
2656 switch (rc)
2657 {
2658 case VERR_PAGE_NOT_PRESENT:
2659 case VERR_PAGE_TABLE_NOT_PRESENT:
2660 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2661 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2662 uErr = 0;
2663 break;
2664
2665 default:
2666 AssertMsgFailed(("%Rrc\n", rc));
2667 case VERR_ACCESS_DENIED:
2668 uErr = X86_TRAP_PF_P;
2669 break;
2670
2671 /** @todo reserved */
2672 }
2673
2674 if (pIemCpu->uCpl == 3)
2675 uErr |= X86_TRAP_PF_US;
2676
2677 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2678 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
2679 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
2680 uErr |= X86_TRAP_PF_ID;
2681
2682 /* Note! RW access callers reporting a WRITE protection fault, will clear
2683 the READ flag before calling. So, read-modify-write accesses (RW)
2684 can safely be reported as READ faults. */
2685 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2686 uErr |= X86_TRAP_PF_RW;
2687
2688 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2689 uErr, GCPtrWhere);
2690}
2691
2692
2693/** \#MF(0) - 10. */
2694DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
2695{
2696 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2697}
2698
2699
2700/** \#AC(0) - 11. */
2701DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
2702{
2703 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2704}
2705
2706
2707/**
2708 * Macro for calling iemCImplRaiseDivideError().
2709 *
2710 * This enables us to add/remove arguments and force different levels of
2711 * inlining as we wish.
2712 *
2713 * @return Strict VBox status code.
2714 */
2715#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
2716IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2717{
2718 NOREF(cbInstr);
2719 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2720}
2721
2722
2723/**
2724 * Macro for calling iemCImplRaiseInvalidLockPrefix().
2725 *
2726 * This enables us to add/remove arguments and force different levels of
2727 * inlining as we wish.
2728 *
2729 * @return Strict VBox status code.
2730 */
2731#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
2732IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2733{
2734 NOREF(cbInstr);
2735 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2736}
2737
2738
2739/**
2740 * Macro for calling iemCImplRaiseInvalidOpcode().
2741 *
2742 * This enables us to add/remove arguments and force different levels of
2743 * inlining as we wish.
2744 *
2745 * @return Strict VBox status code.
2746 */
2747#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
2748IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2749{
2750 NOREF(cbInstr);
2751 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2752}
2753
2754
2755/** @} */
2756
2757
2758/*
2759 *
2760 * Helpers routines.
2761 * Helpers routines.
2762 * Helpers routines.
2763 *
2764 */
2765
2766/**
2767 * Recalculates the effective operand size.
2768 *
2769 * @param pIemCpu The IEM state.
2770 */
2771static void iemRecalEffOpSize(PIEMCPU pIemCpu)
2772{
2773 switch (pIemCpu->enmCpuMode)
2774 {
2775 case IEMMODE_16BIT:
2776 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
2777 break;
2778 case IEMMODE_32BIT:
2779 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
2780 break;
2781 case IEMMODE_64BIT:
2782 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
2783 {
2784 case 0:
2785 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
2786 break;
2787 case IEM_OP_PRF_SIZE_OP:
2788 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2789 break;
2790 case IEM_OP_PRF_SIZE_REX_W:
2791 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
2792 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2793 break;
2794 }
2795 break;
2796 default:
2797 AssertFailed();
2798 }
2799}
2800
2801
2802/**
2803 * Sets the default operand size to 64-bit and recalculates the effective
2804 * operand size.
2805 *
2806 * @param pIemCpu The IEM state.
2807 */
2808static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
2809{
2810 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
2811 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
2812 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
2813 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
2814 else
2815 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
2816}
2817
2818
2819/*
2820 *
2821 * Common opcode decoders.
2822 * Common opcode decoders.
2823 * Common opcode decoders.
2824 *
2825 */
2826//#include <iprt/mem.h>
2827
2828/**
2829 * Used to add extra details about a stub case.
2830 * @param pIemCpu The IEM per CPU state.
2831 */
2832static void iemOpStubMsg2(PIEMCPU pIemCpu)
2833{
2834#if defined(LOG_ENABLED) && defined(IN_RING3)
2835 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2836 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2837 char szRegs[4096];
2838 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2839 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2840 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2841 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2842 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2843 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2844 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2845 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2846 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2847 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2848 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2849 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2850 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2851 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2852 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2853 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2854 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2855 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2856 " efer=%016VR{efer}\n"
2857 " pat=%016VR{pat}\n"
2858 " sf_mask=%016VR{sf_mask}\n"
2859 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2860 " lstar=%016VR{lstar}\n"
2861 " star=%016VR{star} cstar=%016VR{cstar}\n"
2862 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2863 );
2864
2865 char szInstr[256];
2866 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
2867 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2868 szInstr, sizeof(szInstr), NULL);
2869
2870 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
2871#else
2872 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
2873#endif
2874}
2875
2876/**
2877 * Complains about a stub.
2878 *
2879 * Providing two versions of this macro, one for daily use and one for use when
2880 * working on IEM.
2881 */
2882#if 0
2883# define IEMOP_BITCH_ABOUT_STUB() \
2884 do { \
2885 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
2886 iemOpStubMsg2(pIemCpu); \
2887 RTAssertPanic(); \
2888 } while (0)
2889#else
2890# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
2891#endif
2892
2893/** Stubs an opcode. */
2894#define FNIEMOP_STUB(a_Name) \
2895 FNIEMOP_DEF(a_Name) \
2896 { \
2897 IEMOP_BITCH_ABOUT_STUB(); \
2898 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2899 } \
2900 typedef int ignore_semicolon
2901
2902/** Stubs an opcode. */
2903#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
2904 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2905 { \
2906 IEMOP_BITCH_ABOUT_STUB(); \
2907 NOREF(a_Name0); \
2908 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
2909 } \
2910 typedef int ignore_semicolon
2911
2912/** Stubs an opcode which currently should raise \#UD. */
2913#define FNIEMOP_UD_STUB(a_Name) \
2914 FNIEMOP_DEF(a_Name) \
2915 { \
2916 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2917 return IEMOP_RAISE_INVALID_OPCODE(); \
2918 } \
2919 typedef int ignore_semicolon
2920
2921/** Stubs an opcode which currently should raise \#UD. */
2922#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
2923 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
2924 { \
2925 NOREF(a_Name0); \
2926 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
2927 return IEMOP_RAISE_INVALID_OPCODE(); \
2928 } \
2929 typedef int ignore_semicolon
2930
2931
2932
2933/** @name Register Access.
2934 * @{
2935 */
2936
2937/**
2938 * Gets a reference (pointer) to the specified hidden segment register.
2939 *
2940 * @returns Hidden register reference.
2941 * @param pIemCpu The per CPU data.
2942 * @param iSegReg The segment register.
2943 */
2944static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
2945{
2946 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2947 PCPUMSELREG pSReg;
2948 switch (iSegReg)
2949 {
2950 case X86_SREG_ES: pSReg = &pCtx->es; break;
2951 case X86_SREG_CS: pSReg = &pCtx->cs; break;
2952 case X86_SREG_SS: pSReg = &pCtx->ss; break;
2953 case X86_SREG_DS: pSReg = &pCtx->ds; break;
2954 case X86_SREG_FS: pSReg = &pCtx->fs; break;
2955 case X86_SREG_GS: pSReg = &pCtx->gs; break;
2956 default:
2957 AssertFailedReturn(NULL);
2958 }
2959#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2960 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
2961 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
2962#else
2963 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2964#endif
2965 return pSReg;
2966}
2967
2968
2969/**
2970 * Gets a reference (pointer) to the specified segment register (the selector
2971 * value).
2972 *
2973 * @returns Pointer to the selector variable.
2974 * @param pIemCpu The per CPU data.
2975 * @param iSegReg The segment register.
2976 */
2977static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
2978{
2979 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2980 switch (iSegReg)
2981 {
2982 case X86_SREG_ES: return &pCtx->es.Sel;
2983 case X86_SREG_CS: return &pCtx->cs.Sel;
2984 case X86_SREG_SS: return &pCtx->ss.Sel;
2985 case X86_SREG_DS: return &pCtx->ds.Sel;
2986 case X86_SREG_FS: return &pCtx->fs.Sel;
2987 case X86_SREG_GS: return &pCtx->gs.Sel;
2988 }
2989 AssertFailedReturn(NULL);
2990}
2991
2992
2993/**
2994 * Fetches the selector value of a segment register.
2995 *
2996 * @returns The selector value.
2997 * @param pIemCpu The per CPU data.
2998 * @param iSegReg The segment register.
2999 */
3000static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3001{
3002 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3003 switch (iSegReg)
3004 {
3005 case X86_SREG_ES: return pCtx->es.Sel;
3006 case X86_SREG_CS: return pCtx->cs.Sel;
3007 case X86_SREG_SS: return pCtx->ss.Sel;
3008 case X86_SREG_DS: return pCtx->ds.Sel;
3009 case X86_SREG_FS: return pCtx->fs.Sel;
3010 case X86_SREG_GS: return pCtx->gs.Sel;
3011 }
3012 AssertFailedReturn(0xffff);
3013}
3014
3015
3016/**
3017 * Gets a reference (pointer) to the specified general register.
3018 *
3019 * @returns Register reference.
3020 * @param pIemCpu The per CPU data.
3021 * @param iReg The general register.
3022 */
3023static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3024{
3025 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3026 switch (iReg)
3027 {
3028 case X86_GREG_xAX: return &pCtx->rax;
3029 case X86_GREG_xCX: return &pCtx->rcx;
3030 case X86_GREG_xDX: return &pCtx->rdx;
3031 case X86_GREG_xBX: return &pCtx->rbx;
3032 case X86_GREG_xSP: return &pCtx->rsp;
3033 case X86_GREG_xBP: return &pCtx->rbp;
3034 case X86_GREG_xSI: return &pCtx->rsi;
3035 case X86_GREG_xDI: return &pCtx->rdi;
3036 case X86_GREG_x8: return &pCtx->r8;
3037 case X86_GREG_x9: return &pCtx->r9;
3038 case X86_GREG_x10: return &pCtx->r10;
3039 case X86_GREG_x11: return &pCtx->r11;
3040 case X86_GREG_x12: return &pCtx->r12;
3041 case X86_GREG_x13: return &pCtx->r13;
3042 case X86_GREG_x14: return &pCtx->r14;
3043 case X86_GREG_x15: return &pCtx->r15;
3044 }
3045 AssertFailedReturn(NULL);
3046}
3047
3048
3049/**
3050 * Gets a reference (pointer) to the specified 8-bit general register.
3051 *
3052 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3053 *
3054 * @returns Register reference.
3055 * @param pIemCpu The per CPU data.
3056 * @param iReg The register.
3057 */
3058static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3059{
3060 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3061 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3062
3063 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3064 if (iReg >= 4)
3065 pu8Reg++;
3066 return pu8Reg;
3067}
3068
3069
3070/**
3071 * Fetches the value of a 8-bit general register.
3072 *
3073 * @returns The register value.
3074 * @param pIemCpu The per CPU data.
3075 * @param iReg The register.
3076 */
3077static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3078{
3079 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3080 return *pbSrc;
3081}
3082
3083
3084/**
3085 * Fetches the value of a 16-bit general register.
3086 *
3087 * @returns The register value.
3088 * @param pIemCpu The per CPU data.
3089 * @param iReg The register.
3090 */
3091static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3092{
3093 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3094}
3095
3096
3097/**
3098 * Fetches the value of a 32-bit general register.
3099 *
3100 * @returns The register value.
3101 * @param pIemCpu The per CPU data.
3102 * @param iReg The register.
3103 */
3104static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3105{
3106 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3107}
3108
3109
3110/**
3111 * Fetches the value of a 64-bit general register.
3112 *
3113 * @returns The register value.
3114 * @param pIemCpu The per CPU data.
3115 * @param iReg The register.
3116 */
3117static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3118{
3119 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3120}
3121
3122
3123/**
3124 * Is the FPU state in FXSAVE format or not.
3125 *
3126 * @returns true if it is, false if it's in FNSAVE.
3127 * @param pVCpu Pointer to the VMCPU.
3128 */
3129DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3130{
3131#ifdef RT_ARCH_AMD64
3132 NOREF(pIemCpu);
3133 return true;
3134#else
3135 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3136 return true;
3137#endif
3138}
3139
3140
3141/**
3142 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3143 *
3144 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3145 * segment limit.
3146 *
3147 * @param pIemCpu The per CPU data.
3148 * @param offNextInstr The offset of the next instruction.
3149 */
3150static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3151{
3152 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3153 switch (pIemCpu->enmEffOpSize)
3154 {
3155 case IEMMODE_16BIT:
3156 {
3157 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3158 if ( uNewIp > pCtx->cs.u32Limit
3159 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3160 return iemRaiseGeneralProtectionFault0(pIemCpu);
3161 pCtx->rip = uNewIp;
3162 break;
3163 }
3164
3165 case IEMMODE_32BIT:
3166 {
3167 Assert(pCtx->rip <= UINT32_MAX);
3168 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3169
3170 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3171 if (uNewEip > pCtx->cs.u32Limit)
3172 return iemRaiseGeneralProtectionFault0(pIemCpu);
3173 pCtx->rip = uNewEip;
3174 break;
3175 }
3176
3177 case IEMMODE_64BIT:
3178 {
3179 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3180
3181 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3182 if (!IEM_IS_CANONICAL(uNewRip))
3183 return iemRaiseGeneralProtectionFault0(pIemCpu);
3184 pCtx->rip = uNewRip;
3185 break;
3186 }
3187
3188 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3189 }
3190
3191 return VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3197 *
3198 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3199 * segment limit.
3200 *
3201 * @returns Strict VBox status code.
3202 * @param pIemCpu The per CPU data.
3203 * @param offNextInstr The offset of the next instruction.
3204 */
3205static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3206{
3207 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3208 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3209
3210 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3211 if ( uNewIp > pCtx->cs.u32Limit
3212 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3213 return iemRaiseGeneralProtectionFault0(pIemCpu);
3214 /** @todo Test 16-bit jump in 64-bit mode. */
3215 pCtx->rip = uNewIp;
3216
3217 return VINF_SUCCESS;
3218}
3219
3220
3221/**
3222 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3223 *
3224 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3225 * segment limit.
3226 *
3227 * @returns Strict VBox status code.
3228 * @param pIemCpu The per CPU data.
3229 * @param offNextInstr The offset of the next instruction.
3230 */
3231static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3232{
3233 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3234 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3235
3236 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3237 {
3238 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3239
3240 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3241 if (uNewEip > pCtx->cs.u32Limit)
3242 return iemRaiseGeneralProtectionFault0(pIemCpu);
3243 pCtx->rip = uNewEip;
3244 }
3245 else
3246 {
3247 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3248
3249 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3250 if (!IEM_IS_CANONICAL(uNewRip))
3251 return iemRaiseGeneralProtectionFault0(pIemCpu);
3252 pCtx->rip = uNewRip;
3253 }
3254 return VINF_SUCCESS;
3255}
3256
3257
3258/**
3259 * Performs a near jump to the specified address.
3260 *
3261 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3262 * segment limit.
3263 *
3264 * @param pIemCpu The per CPU data.
3265 * @param uNewRip The new RIP value.
3266 */
3267static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3268{
3269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3270 switch (pIemCpu->enmEffOpSize)
3271 {
3272 case IEMMODE_16BIT:
3273 {
3274 Assert(uNewRip <= UINT16_MAX);
3275 if ( uNewRip > pCtx->cs.u32Limit
3276 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3277 return iemRaiseGeneralProtectionFault0(pIemCpu);
3278 /** @todo Test 16-bit jump in 64-bit mode. */
3279 pCtx->rip = uNewRip;
3280 break;
3281 }
3282
3283 case IEMMODE_32BIT:
3284 {
3285 Assert(uNewRip <= UINT32_MAX);
3286 Assert(pCtx->rip <= UINT32_MAX);
3287 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3288
3289 if (uNewRip > pCtx->cs.u32Limit)
3290 return iemRaiseGeneralProtectionFault0(pIemCpu);
3291 pCtx->rip = uNewRip;
3292 break;
3293 }
3294
3295 case IEMMODE_64BIT:
3296 {
3297 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3298
3299 if (!IEM_IS_CANONICAL(uNewRip))
3300 return iemRaiseGeneralProtectionFault0(pIemCpu);
3301 pCtx->rip = uNewRip;
3302 break;
3303 }
3304
3305 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3306 }
3307
3308 return VINF_SUCCESS;
3309}
3310
3311
3312/**
3313 * Get the address of the top of the stack.
3314 *
3315 * @param pCtx The CPU context which SP/ESP/RSP should be
3316 * read.
3317 */
3318DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)
3319{
3320 if (pCtx->ss.Attr.n.u1Long)
3321 return pCtx->rsp;
3322 if (pCtx->ss.Attr.n.u1DefBig)
3323 return pCtx->esp;
3324 return pCtx->sp;
3325}
3326
3327
3328/**
3329 * Updates the RIP/EIP/IP to point to the next instruction.
3330 *
3331 * @param pIemCpu The per CPU data.
3332 * @param cbInstr The number of bytes to add.
3333 */
3334static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)
3335{
3336 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3337 switch (pIemCpu->enmCpuMode)
3338 {
3339 case IEMMODE_16BIT:
3340 Assert(pCtx->rip <= UINT16_MAX);
3341 pCtx->eip += cbInstr;
3342 pCtx->eip &= UINT32_C(0xffff);
3343 break;
3344
3345 case IEMMODE_32BIT:
3346 pCtx->eip += cbInstr;
3347 Assert(pCtx->rip <= UINT32_MAX);
3348 break;
3349
3350 case IEMMODE_64BIT:
3351 pCtx->rip += cbInstr;
3352 break;
3353 default: AssertFailed();
3354 }
3355}
3356
3357
3358/**
3359 * Updates the RIP/EIP/IP to point to the next instruction.
3360 *
3361 * @param pIemCpu The per CPU data.
3362 */
3363static void iemRegUpdateRip(PIEMCPU pIemCpu)
3364{
3365 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);
3366}
3367
3368
3369/**
3370 * Adds to the stack pointer.
3371 *
3372 * @param pCtx The CPU context which SP/ESP/RSP should be
3373 * updated.
3374 * @param cbToAdd The number of bytes to add.
3375 */
3376DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)
3377{
3378 if (pCtx->ss.Attr.n.u1Long)
3379 pCtx->rsp += cbToAdd;
3380 else if (pCtx->ss.Attr.n.u1DefBig)
3381 pCtx->esp += cbToAdd;
3382 else
3383 pCtx->sp += cbToAdd;
3384}
3385
3386
3387/**
3388 * Subtracts from the stack pointer.
3389 *
3390 * @param pCtx The CPU context which SP/ESP/RSP should be
3391 * updated.
3392 * @param cbToSub The number of bytes to subtract.
3393 */
3394DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)
3395{
3396 if (pCtx->ss.Attr.n.u1Long)
3397 pCtx->rsp -= cbToSub;
3398 else if (pCtx->ss.Attr.n.u1DefBig)
3399 pCtx->esp -= cbToSub;
3400 else
3401 pCtx->sp -= cbToSub;
3402}
3403
3404
3405/**
3406 * Adds to the temporary stack pointer.
3407 *
3408 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3409 * @param cbToAdd The number of bytes to add.
3410 * @param pCtx Where to get the current stack mode.
3411 */
3412DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint16_t cbToAdd, PCCPUMCTX pCtx)
3413{
3414 if (pCtx->ss.Attr.n.u1Long)
3415 pTmpRsp->u += cbToAdd;
3416 else if (pCtx->ss.Attr.n.u1DefBig)
3417 pTmpRsp->DWords.dw0 += cbToAdd;
3418 else
3419 pTmpRsp->Words.w0 += cbToAdd;
3420}
3421
3422
3423/**
3424 * Subtracts from the temporary stack pointer.
3425 *
3426 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3427 * @param cbToSub The number of bytes to subtract.
3428 * @param pCtx Where to get the current stack mode.
3429 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3430 * expecting that.
3431 */
3432DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint16_t cbToSub, PCCPUMCTX pCtx)
3433{
3434 if (pCtx->ss.Attr.n.u1Long)
3435 pTmpRsp->u -= cbToSub;
3436 else if (pCtx->ss.Attr.n.u1DefBig)
3437 pTmpRsp->DWords.dw0 -= cbToSub;
3438 else
3439 pTmpRsp->Words.w0 -= cbToSub;
3440}
3441
3442
3443/**
3444 * Calculates the effective stack address for a push of the specified size as
3445 * well as the new RSP value (upper bits may be masked).
3446 *
3447 * @returns Effective stack addressf for the push.
3448 * @param pCtx Where to get the current stack mode.
3449 * @param cbItem The size of the stack item to pop.
3450 * @param puNewRsp Where to return the new RSP value.
3451 */
3452DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3453{
3454 RTUINT64U uTmpRsp;
3455 RTGCPTR GCPtrTop;
3456 uTmpRsp.u = pCtx->rsp;
3457
3458 if (pCtx->ss.Attr.n.u1Long)
3459 GCPtrTop = uTmpRsp.u -= cbItem;
3460 else if (pCtx->ss.Attr.n.u1DefBig)
3461 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3462 else
3463 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3464 *puNewRsp = uTmpRsp.u;
3465 return GCPtrTop;
3466}
3467
3468
3469/**
3470 * Gets the current stack pointer and calculates the value after a pop of the
3471 * specified size.
3472 *
3473 * @returns Current stack pointer.
3474 * @param pCtx Where to get the current stack mode.
3475 * @param cbItem The size of the stack item to pop.
3476 * @param puNewRsp Where to return the new RSP value.
3477 */
3478DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
3479{
3480 RTUINT64U uTmpRsp;
3481 RTGCPTR GCPtrTop;
3482 uTmpRsp.u = pCtx->rsp;
3483
3484 if (pCtx->ss.Attr.n.u1Long)
3485 {
3486 GCPtrTop = uTmpRsp.u;
3487 uTmpRsp.u += cbItem;
3488 }
3489 else if (pCtx->ss.Attr.n.u1DefBig)
3490 {
3491 GCPtrTop = uTmpRsp.DWords.dw0;
3492 uTmpRsp.DWords.dw0 += cbItem;
3493 }
3494 else
3495 {
3496 GCPtrTop = uTmpRsp.Words.w0;
3497 uTmpRsp.Words.w0 += cbItem;
3498 }
3499 *puNewRsp = uTmpRsp.u;
3500 return GCPtrTop;
3501}
3502
3503
3504/**
3505 * Calculates the effective stack address for a push of the specified size as
3506 * well as the new temporary RSP value (upper bits may be masked).
3507 *
3508 * @returns Effective stack addressf for the push.
3509 * @param pTmpRsp The temporary stack pointer. This is updated.
3510 * @param cbItem The size of the stack item to pop.
3511 * @param puNewRsp Where to return the new RSP value.
3512 */
3513DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3514{
3515 RTGCPTR GCPtrTop;
3516
3517 if (pCtx->ss.Attr.n.u1Long)
3518 GCPtrTop = pTmpRsp->u -= cbItem;
3519 else if (pCtx->ss.Attr.n.u1DefBig)
3520 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3521 else
3522 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3523 return GCPtrTop;
3524}
3525
3526
3527/**
3528 * Gets the effective stack address for a pop of the specified size and
3529 * calculates and updates the temporary RSP.
3530 *
3531 * @returns Current stack pointer.
3532 * @param pTmpRsp The temporary stack pointer. This is updated.
3533 * @param pCtx Where to get the current stack mode.
3534 * @param cbItem The size of the stack item to pop.
3535 */
3536DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)
3537{
3538 RTGCPTR GCPtrTop;
3539 if (pCtx->ss.Attr.n.u1Long)
3540 {
3541 GCPtrTop = pTmpRsp->u;
3542 pTmpRsp->u += cbItem;
3543 }
3544 else if (pCtx->ss.Attr.n.u1DefBig)
3545 {
3546 GCPtrTop = pTmpRsp->DWords.dw0;
3547 pTmpRsp->DWords.dw0 += cbItem;
3548 }
3549 else
3550 {
3551 GCPtrTop = pTmpRsp->Words.w0;
3552 pTmpRsp->Words.w0 += cbItem;
3553 }
3554 return GCPtrTop;
3555}
3556
3557
3558/**
3559 * Checks if an Intel CPUID feature bit is set.
3560 *
3561 * @returns true / false.
3562 *
3563 * @param pIemCpu The IEM per CPU data.
3564 * @param fEdx The EDX bit to test, or 0 if ECX.
3565 * @param fEcx The ECX bit to test, or 0 if EDX.
3566 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
3567 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
3568 */
3569static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3570{
3571 uint32_t uEax, uEbx, uEcx, uEdx;
3572 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
3573 return (fEcx && (uEcx & fEcx))
3574 || (fEdx && (uEdx & fEdx));
3575}
3576
3577
3578/**
3579 * Checks if an AMD CPUID feature bit is set.
3580 *
3581 * @returns true / false.
3582 *
3583 * @param pIemCpu The IEM per CPU data.
3584 * @param fEdx The EDX bit to test, or 0 if ECX.
3585 * @param fEcx The ECX bit to test, or 0 if EDX.
3586 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
3587 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
3588 */
3589static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
3590{
3591 uint32_t uEax, uEbx, uEcx, uEdx;
3592 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
3593 return (fEcx && (uEcx & fEcx))
3594 || (fEdx && (uEdx & fEdx));
3595}
3596
3597/** @} */
3598
3599
3600/** @name FPU access and helpers.
3601 *
3602 * @{
3603 */
3604
3605
3606/**
3607 * Hook for preparing to use the host FPU.
3608 *
3609 * This is necessary in ring-0 and raw-mode context.
3610 *
3611 * @param pIemCpu The IEM per CPU data.
3612 */
3613DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
3614{
3615#ifdef IN_RING3
3616 NOREF(pIemCpu);
3617#else
3618/** @todo RZ: FIXME */
3619//# error "Implement me"
3620#endif
3621}
3622
3623
3624/**
3625 * Stores a QNaN value into a FPU register.
3626 *
3627 * @param pReg Pointer to the register.
3628 */
3629DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
3630{
3631 pReg->au32[0] = UINT32_C(0x00000000);
3632 pReg->au32[1] = UINT32_C(0xc0000000);
3633 pReg->au16[4] = UINT16_C(0xffff);
3634}
3635
3636
3637/**
3638 * Updates the FOP, FPU.CS and FPUIP registers.
3639 *
3640 * @param pIemCpu The IEM per CPU data.
3641 * @param pCtx The CPU context.
3642 */
3643DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
3644{
3645 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
3646 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
3647 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
3648 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3649 {
3650 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
3651 * happens in real mode here based on the fnsave and fnstenv images. */
3652 pCtx->fpu.CS = 0;
3653 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
3654 }
3655 else
3656 {
3657 pCtx->fpu.CS = pCtx->cs.Sel;
3658 pCtx->fpu.FPUIP = pCtx->rip;
3659 }
3660}
3661
3662
3663/**
3664 * Updates the FPU.DS and FPUDP registers.
3665 *
3666 * @param pIemCpu The IEM per CPU data.
3667 * @param pCtx The CPU context.
3668 * @param iEffSeg The effective segment register.
3669 * @param GCPtrEff The effective address relative to @a iEffSeg.
3670 */
3671DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3672{
3673 RTSEL sel;
3674 switch (iEffSeg)
3675 {
3676 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
3677 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
3678 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
3679 case X86_SREG_ES: sel = pCtx->es.Sel; break;
3680 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
3681 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
3682 default:
3683 AssertMsgFailed(("%d\n", iEffSeg));
3684 sel = pCtx->ds.Sel;
3685 }
3686 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
3687 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3688 {
3689 pCtx->fpu.DS = 0;
3690 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
3691 }
3692 else
3693 {
3694 pCtx->fpu.DS = sel;
3695 pCtx->fpu.FPUDP = GCPtrEff;
3696 }
3697}
3698
3699
3700/**
3701 * Rotates the stack registers in the push direction.
3702 *
3703 * @param pCtx The CPU context.
3704 * @remarks This is a complete waste of time, but fxsave stores the registers in
3705 * stack order.
3706 */
3707DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
3708{
3709 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
3710 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
3711 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
3712 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
3713 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
3714 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
3715 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
3716 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
3717 pCtx->fpu.aRegs[0].r80 = r80Tmp;
3718}
3719
3720
3721/**
3722 * Rotates the stack registers in the pop direction.
3723 *
3724 * @param pCtx The CPU context.
3725 * @remarks This is a complete waste of time, but fxsave stores the registers in
3726 * stack order.
3727 */
3728DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
3729{
3730 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
3731 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
3732 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
3733 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
3734 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
3735 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
3736 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
3737 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
3738 pCtx->fpu.aRegs[7].r80 = r80Tmp;
3739}
3740
3741
3742/**
3743 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
3744 * exception prevents it.
3745 *
3746 * @param pIemCpu The IEM per CPU data.
3747 * @param pResult The FPU operation result to push.
3748 * @param pCtx The CPU context.
3749 */
3750static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
3751{
3752 /* Update FSW and bail if there are pending exceptions afterwards. */
3753 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3754 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3755 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3756 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3757 {
3758 pCtx->fpu.FSW = fFsw;
3759 return;
3760 }
3761
3762 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3763 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3764 {
3765 /* All is fine, push the actual value. */
3766 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3767 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
3768 }
3769 else if (pCtx->fpu.FCW & X86_FCW_IM)
3770 {
3771 /* Masked stack overflow, push QNaN. */
3772 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3773 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3774 }
3775 else
3776 {
3777 /* Raise stack overflow, don't push anything. */
3778 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3779 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3780 return;
3781 }
3782
3783 fFsw &= ~X86_FSW_TOP_MASK;
3784 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3785 pCtx->fpu.FSW = fFsw;
3786
3787 iemFpuRotateStackPush(pCtx);
3788}
3789
3790
3791/**
3792 * Stores a result in a FPU register and updates the FSW and FTW.
3793 *
3794 * @param pIemCpu The IEM per CPU data.
3795 * @param pResult The result to store.
3796 * @param iStReg Which FPU register to store it in.
3797 * @param pCtx The CPU context.
3798 */
3799static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
3800{
3801 Assert(iStReg < 8);
3802 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
3803 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3804 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
3805 pCtx->fpu.FTW |= RT_BIT(iReg);
3806 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
3807}
3808
3809
3810/**
3811 * Only updates the FPU status word (FSW) with the result of the current
3812 * instruction.
3813 *
3814 * @param pCtx The CPU context.
3815 * @param u16FSW The FSW output of the current instruction.
3816 */
3817static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
3818{
3819 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
3820 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
3821}
3822
3823
3824/**
3825 * Pops one item off the FPU stack if no pending exception prevents it.
3826 *
3827 * @param pCtx The CPU context.
3828 */
3829static void iemFpuMaybePopOne(PCPUMCTX pCtx)
3830{
3831 /* Check pending exceptions. */
3832 uint16_t uFSW = pCtx->fpu.FSW;
3833 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3834 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3835 return;
3836
3837 /* TOP--. */
3838 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
3839 uFSW &= ~X86_FSW_TOP_MASK;
3840 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
3841 pCtx->fpu.FSW = uFSW;
3842
3843 /* Mark the previous ST0 as empty. */
3844 iOldTop >>= X86_FSW_TOP_SHIFT;
3845 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
3846
3847 /* Rotate the registers. */
3848 iemFpuRotateStackPop(pCtx);
3849}
3850
3851
3852/**
3853 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
3854 *
3855 * @param pIemCpu The IEM per CPU data.
3856 * @param pResult The FPU operation result to push.
3857 */
3858static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
3859{
3860 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3861 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3862 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3863}
3864
3865
3866/**
3867 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
3868 * and sets FPUDP and FPUDS.
3869 *
3870 * @param pIemCpu The IEM per CPU data.
3871 * @param pResult The FPU operation result to push.
3872 * @param iEffSeg The effective segment register.
3873 * @param GCPtrEff The effective address relative to @a iEffSeg.
3874 */
3875static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3876{
3877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3878 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
3879 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3880 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
3881}
3882
3883
3884/**
3885 * Replace ST0 with the first value and push the second onto the FPU stack,
3886 * unless a pending exception prevents it.
3887 *
3888 * @param pIemCpu The IEM per CPU data.
3889 * @param pResult The FPU operation result to store and push.
3890 */
3891static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
3892{
3893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3894 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3895
3896 /* Update FSW and bail if there are pending exceptions afterwards. */
3897 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
3898 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
3899 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
3900 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
3901 {
3902 pCtx->fpu.FSW = fFsw;
3903 return;
3904 }
3905
3906 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
3907 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
3908 {
3909 /* All is fine, push the actual value. */
3910 pCtx->fpu.FTW |= RT_BIT(iNewTop);
3911 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
3912 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
3913 }
3914 else if (pCtx->fpu.FCW & X86_FCW_IM)
3915 {
3916 /* Masked stack overflow, push QNaN. */
3917 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
3918 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
3919 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
3920 }
3921 else
3922 {
3923 /* Raise stack overflow, don't push anything. */
3924 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
3925 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
3926 return;
3927 }
3928
3929 fFsw &= ~X86_FSW_TOP_MASK;
3930 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
3931 pCtx->fpu.FSW = fFsw;
3932
3933 iemFpuRotateStackPush(pCtx);
3934}
3935
3936
3937/**
3938 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3939 * FOP.
3940 *
3941 * @param pIemCpu The IEM per CPU data.
3942 * @param pResult The result to store.
3943 * @param iStReg Which FPU register to store it in.
3944 * @param pCtx The CPU context.
3945 */
3946static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3947{
3948 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3949 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3950 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3951}
3952
3953
3954/**
3955 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
3956 * FOP, and then pops the stack.
3957 *
3958 * @param pIemCpu The IEM per CPU data.
3959 * @param pResult The result to store.
3960 * @param iStReg Which FPU register to store it in.
3961 * @param pCtx The CPU context.
3962 */
3963static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
3964{
3965 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3966 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3967 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3968 iemFpuMaybePopOne(pCtx);
3969}
3970
3971
3972/**
3973 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3974 * FPUDP, and FPUDS.
3975 *
3976 * @param pIemCpu The IEM per CPU data.
3977 * @param pResult The result to store.
3978 * @param iStReg Which FPU register to store it in.
3979 * @param pCtx The CPU context.
3980 * @param iEffSeg The effective memory operand selector register.
3981 * @param GCPtrEff The effective memory operand offset.
3982 */
3983static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
3984{
3985 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3986 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
3987 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
3988 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
3989}
3990
3991
3992/**
3993 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
3994 * FPUDP, and FPUDS, and then pops the stack.
3995 *
3996 * @param pIemCpu The IEM per CPU data.
3997 * @param pResult The result to store.
3998 * @param iStReg Which FPU register to store it in.
3999 * @param pCtx The CPU context.
4000 * @param iEffSeg The effective memory operand selector register.
4001 * @param GCPtrEff The effective memory operand offset.
4002 */
4003static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4004 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4005{
4006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4007 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4008 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4009 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4010 iemFpuMaybePopOne(pCtx);
4011}
4012
4013
4014/**
4015 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4016 *
4017 * @param pIemCpu The IEM per CPU data.
4018 */
4019static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4020{
4021 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4022}
4023
4024
4025/**
4026 * Marks the specified stack register as free (for FFREE).
4027 *
4028 * @param pIemCpu The IEM per CPU data.
4029 * @param iStReg The register to free.
4030 */
4031static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4032{
4033 Assert(iStReg < 8);
4034 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4035 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4036 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4037}
4038
4039
4040/**
4041 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4042 *
4043 * @param pIemCpu The IEM per CPU data.
4044 */
4045static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4046{
4047 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4048 uint16_t uFsw = pCtx->fpu.FSW;
4049 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4050 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4051 uFsw &= ~X86_FSW_TOP_MASK;
4052 uFsw |= uTop;
4053 pCtx->fpu.FSW = uFsw;
4054}
4055
4056
4057/**
4058 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4059 *
4060 * @param pIemCpu The IEM per CPU data.
4061 */
4062static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4063{
4064 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4065 uint16_t uFsw = pCtx->fpu.FSW;
4066 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4067 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4068 uFsw &= ~X86_FSW_TOP_MASK;
4069 uFsw |= uTop;
4070 pCtx->fpu.FSW = uFsw;
4071}
4072
4073
4074/**
4075 * Updates the FSW, FOP, FPUIP, and FPUCS.
4076 *
4077 * @param pIemCpu The IEM per CPU data.
4078 * @param u16FSW The FSW from the current instruction.
4079 */
4080static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4081{
4082 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4083 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4084 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4085}
4086
4087
4088/**
4089 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4090 *
4091 * @param pIemCpu The IEM per CPU data.
4092 * @param u16FSW The FSW from the current instruction.
4093 */
4094static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4095{
4096 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4097 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4098 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4099 iemFpuMaybePopOne(pCtx);
4100}
4101
4102
4103/**
4104 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4105 *
4106 * @param pIemCpu The IEM per CPU data.
4107 * @param u16FSW The FSW from the current instruction.
4108 * @param iEffSeg The effective memory operand selector register.
4109 * @param GCPtrEff The effective memory operand offset.
4110 */
4111static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4112{
4113 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4114 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4115 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4116 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4117}
4118
4119
4120/**
4121 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4122 *
4123 * @param pIemCpu The IEM per CPU data.
4124 * @param u16FSW The FSW from the current instruction.
4125 */
4126static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4127{
4128 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4129 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4130 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4131 iemFpuMaybePopOne(pCtx);
4132 iemFpuMaybePopOne(pCtx);
4133}
4134
4135
4136/**
4137 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4138 *
4139 * @param pIemCpu The IEM per CPU data.
4140 * @param u16FSW The FSW from the current instruction.
4141 * @param iEffSeg The effective memory operand selector register.
4142 * @param GCPtrEff The effective memory operand offset.
4143 */
4144static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4145{
4146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4147 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4148 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4149 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4150 iemFpuMaybePopOne(pCtx);
4151}
4152
4153
4154/**
4155 * Worker routine for raising an FPU stack underflow exception.
4156 *
4157 * @param pIemCpu The IEM per CPU data.
4158 * @param iStReg The stack register being accessed.
4159 * @param pCtx The CPU context.
4160 */
4161static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4162{
4163 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4164 if (pCtx->fpu.FCW & X86_FCW_IM)
4165 {
4166 /* Masked underflow. */
4167 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4168 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4169 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4170 if (iStReg != UINT8_MAX)
4171 {
4172 pCtx->fpu.FTW |= RT_BIT(iReg);
4173 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4174 }
4175 }
4176 else
4177 {
4178 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4179 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4180 }
4181}
4182
4183
4184/**
4185 * Raises a FPU stack underflow exception.
4186 *
4187 * @param pIemCpu The IEM per CPU data.
4188 * @param iStReg The destination register that should be loaded
4189 * with QNaN if \#IS is not masked. Specify
4190 * UINT8_MAX if none (like for fcom).
4191 */
4192DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4193{
4194 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4195 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4196 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4197}
4198
4199
4200DECL_NO_INLINE(static, void)
4201iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4202{
4203 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4204 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4205 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4206 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4207}
4208
4209
4210DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4211{
4212 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4213 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4214 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4215 iemFpuMaybePopOne(pCtx);
4216}
4217
4218
4219DECL_NO_INLINE(static, void)
4220iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4221{
4222 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4223 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4224 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4225 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4226 iemFpuMaybePopOne(pCtx);
4227}
4228
4229
4230DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4231{
4232 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4233 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4234 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4235 iemFpuMaybePopOne(pCtx);
4236 iemFpuMaybePopOne(pCtx);
4237}
4238
4239
4240DECL_NO_INLINE(static, void)
4241iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4242{
4243 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4244 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4245
4246 if (pCtx->fpu.FCW & X86_FCW_IM)
4247 {
4248 /* Masked overflow - Push QNaN. */
4249 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4250 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4251 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4252 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4253 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4254 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4255 iemFpuRotateStackPush(pCtx);
4256 }
4257 else
4258 {
4259 /* Exception pending - don't change TOP or the register stack. */
4260 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4261 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4262 }
4263}
4264
4265
4266DECL_NO_INLINE(static, void)
4267iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4268{
4269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4270 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4271
4272 if (pCtx->fpu.FCW & X86_FCW_IM)
4273 {
4274 /* Masked overflow - Push QNaN. */
4275 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4276 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4277 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4278 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4279 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4280 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4281 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4282 iemFpuRotateStackPush(pCtx);
4283 }
4284 else
4285 {
4286 /* Exception pending - don't change TOP or the register stack. */
4287 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4288 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4289 }
4290}
4291
4292
4293/**
4294 * Worker routine for raising an FPU stack overflow exception on a push.
4295 *
4296 * @param pIemCpu The IEM per CPU data.
4297 * @param pCtx The CPU context.
4298 */
4299static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4300{
4301 if (pCtx->fpu.FCW & X86_FCW_IM)
4302 {
4303 /* Masked overflow. */
4304 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4305 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4306 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4307 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4308 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4309 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4310 iemFpuRotateStackPush(pCtx);
4311 }
4312 else
4313 {
4314 /* Exception pending - don't change TOP or the register stack. */
4315 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4316 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4317 }
4318}
4319
4320
4321/**
4322 * Raises a FPU stack overflow exception on a push.
4323 *
4324 * @param pIemCpu The IEM per CPU data.
4325 */
4326DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4327{
4328 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4329 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4330 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4331}
4332
4333
4334/**
4335 * Raises a FPU stack overflow exception on a push with a memory operand.
4336 *
4337 * @param pIemCpu The IEM per CPU data.
4338 * @param iEffSeg The effective memory operand selector register.
4339 * @param GCPtrEff The effective memory operand offset.
4340 */
4341DECL_NO_INLINE(static, void)
4342iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4343{
4344 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4345 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4346 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4347 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4348}
4349
4350
4351static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4352{
4353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4354 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4355 if (pCtx->fpu.FTW & RT_BIT(iReg))
4356 return VINF_SUCCESS;
4357 return VERR_NOT_FOUND;
4358}
4359
4360
4361static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4362{
4363 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4364 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4365 if (pCtx->fpu.FTW & RT_BIT(iReg))
4366 {
4367 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4368 return VINF_SUCCESS;
4369 }
4370 return VERR_NOT_FOUND;
4371}
4372
4373
4374static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4375 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
4376{
4377 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4378 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4379 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4380 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4381 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4382 {
4383 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4384 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
4385 return VINF_SUCCESS;
4386 }
4387 return VERR_NOT_FOUND;
4388}
4389
4390
4391static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
4392{
4393 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4394 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4395 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4396 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4397 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4398 {
4399 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
4400 return VINF_SUCCESS;
4401 }
4402 return VERR_NOT_FOUND;
4403}
4404
4405
4406/**
4407 * Updates the FPU exception status after FCW is changed.
4408 *
4409 * @param pCtx The CPU context.
4410 */
4411static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
4412{
4413 uint16_t u16Fsw = pCtx->fpu.FSW;
4414 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
4415 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4416 else
4417 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4418 pCtx->fpu.FSW = u16Fsw;
4419}
4420
4421
4422/**
4423 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4424 *
4425 * @returns The full FTW.
4426 * @param pCtx The CPU state.
4427 */
4428static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
4429{
4430 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
4431 uint16_t u16Ftw = 0;
4432 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4433 for (unsigned iSt = 0; iSt < 8; iSt++)
4434 {
4435 unsigned const iReg = (iSt + iTop) & 7;
4436 if (!(u8Ftw & RT_BIT(iReg)))
4437 u16Ftw |= 3 << (iReg * 2); /* empty */
4438 else
4439 {
4440 uint16_t uTag;
4441 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
4442 if (pr80Reg->s.uExponent == 0x7fff)
4443 uTag = 2; /* Exponent is all 1's => Special. */
4444 else if (pr80Reg->s.uExponent == 0x0000)
4445 {
4446 if (pr80Reg->s.u64Mantissa == 0x0000)
4447 uTag = 1; /* All bits are zero => Zero. */
4448 else
4449 uTag = 2; /* Must be special. */
4450 }
4451 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
4452 uTag = 0; /* Valid. */
4453 else
4454 uTag = 2; /* Must be special. */
4455
4456 u16Ftw |= uTag << (iReg * 2); /* empty */
4457 }
4458 }
4459
4460 return u16Ftw;
4461}
4462
4463
4464/**
4465 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4466 *
4467 * @returns The compressed FTW.
4468 * @param u16FullFtw The full FTW to convert.
4469 */
4470static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
4471{
4472 uint8_t u8Ftw = 0;
4473 for (unsigned i = 0; i < 8; i++)
4474 {
4475 if ((u16FullFtw & 3) != 3 /*empty*/)
4476 u8Ftw |= RT_BIT(i);
4477 u16FullFtw >>= 2;
4478 }
4479
4480 return u8Ftw;
4481}
4482
4483/** @} */
4484
4485
4486/** @name Memory access.
4487 *
4488 * @{
4489 */
4490
4491
4492/**
4493 * Updates the IEMCPU::cbWritten counter if applicable.
4494 *
4495 * @param pIemCpu The IEM per CPU data.
4496 * @param fAccess The access being accounted for.
4497 * @param cbMem The access size.
4498 */
4499DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
4500{
4501 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
4502 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
4503 pIemCpu->cbWritten += (uint32_t)cbMem;
4504}
4505
4506
4507/**
4508 * Checks if the given segment can be written to, raise the appropriate
4509 * exception if not.
4510 *
4511 * @returns VBox strict status code.
4512 *
4513 * @param pIemCpu The IEM per CPU data.
4514 * @param pHid Pointer to the hidden register.
4515 * @param iSegReg The register number.
4516 */
4517static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4518{
4519 if (!pHid->Attr.n.u1Present)
4520 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4521
4522 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4523 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4524 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4525 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
4526
4527 /** @todo DPL/RPL/CPL? */
4528
4529 return VINF_SUCCESS;
4530}
4531
4532
4533/**
4534 * Checks if the given segment can be read from, raise the appropriate
4535 * exception if not.
4536 *
4537 * @returns VBox strict status code.
4538 *
4539 * @param pIemCpu The IEM per CPU data.
4540 * @param pHid Pointer to the hidden register.
4541 * @param iSegReg The register number.
4542 */
4543static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
4544{
4545 if (!pHid->Attr.n.u1Present)
4546 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
4547
4548 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE
4549 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
4550 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
4551
4552 /** @todo DPL/RPL/CPL? */
4553
4554 return VINF_SUCCESS;
4555}
4556
4557
4558/**
4559 * Applies the segment limit, base and attributes.
4560 *
4561 * This may raise a \#GP or \#SS.
4562 *
4563 * @returns VBox strict status code.
4564 *
4565 * @param pIemCpu The IEM per CPU data.
4566 * @param fAccess The kind of access which is being performed.
4567 * @param iSegReg The index of the segment register to apply.
4568 * This is UINT8_MAX if none (for IDT, GDT, LDT,
4569 * TSS, ++).
4570 * @param pGCPtrMem Pointer to the guest memory address to apply
4571 * segmentation to. Input and output parameter.
4572 */
4573static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
4574 size_t cbMem, PRTGCPTR pGCPtrMem)
4575{
4576 if (iSegReg == UINT8_MAX)
4577 return VINF_SUCCESS;
4578
4579 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
4580 switch (pIemCpu->enmCpuMode)
4581 {
4582 case IEMMODE_16BIT:
4583 case IEMMODE_32BIT:
4584 {
4585 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
4586 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
4587
4588 Assert(pSel->Attr.n.u1Present);
4589 Assert(pSel->Attr.n.u1DescType);
4590 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
4591 {
4592 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4593 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4594 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4595
4596 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4597 {
4598 /** @todo CPL check. */
4599 }
4600
4601 /*
4602 * There are two kinds of data selectors, normal and expand down.
4603 */
4604 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
4605 {
4606 if ( GCPtrFirst32 > pSel->u32Limit
4607 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4608 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4609
4610 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4611 }
4612 else
4613 {
4614 /** @todo implement expand down segments. */
4615 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
4616 }
4617 }
4618 else
4619 {
4620
4621 /*
4622 * Code selector and usually be used to read thru, writing is
4623 * only permitted in real and V8086 mode.
4624 */
4625 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4626 || ( (fAccess & IEM_ACCESS_TYPE_READ)
4627 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
4628 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
4629 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
4630
4631 if ( GCPtrFirst32 > pSel->u32Limit
4632 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
4633 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
4634
4635 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4636 {
4637 /** @todo CPL check. */
4638 }
4639
4640 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
4641 }
4642 return VINF_SUCCESS;
4643 }
4644
4645 case IEMMODE_64BIT:
4646 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
4647 *pGCPtrMem += pSel->u64Base;
4648 return VINF_SUCCESS;
4649
4650 default:
4651 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
4652 }
4653}
4654
4655
4656/**
4657 * Translates a virtual address to a physical physical address and checks if we
4658 * can access the page as specified.
4659 *
4660 * @param pIemCpu The IEM per CPU data.
4661 * @param GCPtrMem The virtual address.
4662 * @param fAccess The intended access.
4663 * @param pGCPhysMem Where to return the physical address.
4664 */
4665static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
4666 PRTGCPHYS pGCPhysMem)
4667{
4668 /** @todo Need a different PGM interface here. We're currently using
4669 * generic / REM interfaces. this won't cut it for R0 & RC. */
4670 RTGCPHYS GCPhys;
4671 uint64_t fFlags;
4672 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
4673 if (RT_FAILURE(rc))
4674 {
4675 /** @todo Check unassigned memory in unpaged mode. */
4676 /** @todo Reserved bits in page tables. Requires new PGM interface. */
4677 *pGCPhysMem = NIL_RTGCPHYS;
4678 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
4679 }
4680
4681 /* If the page is writable and does not have the no-exec bit set, all
4682 access is allowed. Otherwise we'll have to check more carefully... */
4683 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
4684 {
4685 /* Write to read only memory? */
4686 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
4687 && !(fFlags & X86_PTE_RW)
4688 && ( pIemCpu->uCpl != 0
4689 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
4690 {
4691 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
4692 *pGCPhysMem = NIL_RTGCPHYS;
4693 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
4694 }
4695
4696 /* Kernel memory accessed by userland? */
4697 if ( !(fFlags & X86_PTE_US)
4698 && pIemCpu->uCpl == 3
4699 && !(fAccess & IEM_ACCESS_WHAT_SYS))
4700 {
4701 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
4702 *pGCPhysMem = NIL_RTGCPHYS;
4703 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
4704 }
4705
4706 /* Executing non-executable memory? */
4707 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
4708 && (fFlags & X86_PTE_PAE_NX)
4709 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
4710 {
4711 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
4712 *pGCPhysMem = NIL_RTGCPHYS;
4713 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
4714 VERR_ACCESS_DENIED);
4715 }
4716 }
4717
4718 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
4719 *pGCPhysMem = GCPhys;
4720 return VINF_SUCCESS;
4721}
4722
4723
4724
4725/**
4726 * Maps a physical page.
4727 *
4728 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4729 * @param pIemCpu The IEM per CPU data.
4730 * @param GCPhysMem The physical address.
4731 * @param fAccess The intended access.
4732 * @param ppvMem Where to return the mapping address.
4733 * @param pLock The PGM lock.
4734 */
4735static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
4736{
4737#ifdef IEM_VERIFICATION_MODE_FULL
4738 /* Force the alternative path so we can ignore writes. */
4739 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
4740 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4741#endif
4742#ifdef IEM_LOG_MEMORY_WRITES
4743 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4744 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4745#endif
4746#ifdef IEM_VERIFICATION_MODE_MINIMAL
4747 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4748#endif
4749
4750 /** @todo This API may require some improving later. A private deal with PGM
4751 * regarding locking and unlocking needs to be struct. A couple of TLBs
4752 * living in PGM, but with publicly accessible inlined access methods
4753 * could perhaps be an even better solution. */
4754 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
4755 GCPhysMem,
4756 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4757 pIemCpu->fBypassHandlers,
4758 ppvMem,
4759 pLock);
4760 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4761 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4762 return rc;
4763}
4764
4765
4766/**
4767 * Unmap a page previously mapped by iemMemPageMap.
4768 *
4769 * @param pIemCpu The IEM per CPU data.
4770 * @param GCPhysMem The physical address.
4771 * @param fAccess The intended access.
4772 * @param pvMem What iemMemPageMap returned.
4773 * @param pLock The PGM lock.
4774 */
4775DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
4776{
4777 NOREF(pIemCpu);
4778 NOREF(GCPhysMem);
4779 NOREF(fAccess);
4780 NOREF(pvMem);
4781 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
4782}
4783
4784
4785/**
4786 * Looks up a memory mapping entry.
4787 *
4788 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
4789 * @param pIemCpu The IEM per CPU data.
4790 * @param pvMem The memory address.
4791 * @param fAccess The access to.
4792 */
4793DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
4794{
4795 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
4796 if ( pIemCpu->aMemMappings[0].pv == pvMem
4797 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4798 return 0;
4799 if ( pIemCpu->aMemMappings[1].pv == pvMem
4800 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4801 return 1;
4802 if ( pIemCpu->aMemMappings[2].pv == pvMem
4803 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
4804 return 2;
4805 return VERR_NOT_FOUND;
4806}
4807
4808
4809/**
4810 * Finds a free memmap entry when using iNextMapping doesn't work.
4811 *
4812 * @returns Memory mapping index, 1024 on failure.
4813 * @param pIemCpu The IEM per CPU data.
4814 */
4815static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
4816{
4817 /*
4818 * The easy case.
4819 */
4820 if (pIemCpu->cActiveMappings == 0)
4821 {
4822 pIemCpu->iNextMapping = 1;
4823 return 0;
4824 }
4825
4826 /* There should be enough mappings for all instructions. */
4827 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
4828
4829 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
4830 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
4831 return i;
4832
4833 AssertFailedReturn(1024);
4834}
4835
4836
4837/**
4838 * Commits a bounce buffer that needs writing back and unmaps it.
4839 *
4840 * @returns Strict VBox status code.
4841 * @param pIemCpu The IEM per CPU data.
4842 * @param iMemMap The index of the buffer to commit.
4843 */
4844static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
4845{
4846 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4847 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4848
4849 /*
4850 * Do the writing.
4851 */
4852 int rc;
4853#ifndef IEM_VERIFICATION_MODE_MINIMAL
4854 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
4855 && !IEM_VERIFICATION_ENABLED(pIemCpu))
4856 {
4857 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4858 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4859 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4860 if (!pIemCpu->fBypassHandlers)
4861 {
4862 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4863 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4864 pbBuf,
4865 cbFirst);
4866 if (cbSecond && rc == VINF_SUCCESS)
4867 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
4868 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4869 pbBuf + cbFirst,
4870 cbSecond);
4871 }
4872 else
4873 {
4874 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4875 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4876 pbBuf,
4877 cbFirst);
4878 if (cbSecond && rc == VINF_SUCCESS)
4879 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
4880 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4881 pbBuf + cbFirst,
4882 cbSecond);
4883 }
4884 if (rc != VINF_SUCCESS)
4885 {
4886 /** @todo status code handling */
4887 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
4888 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
4889 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4890 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
4891 }
4892 }
4893 else
4894#endif
4895 rc = VINF_SUCCESS;
4896
4897#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
4898 /*
4899 * Record the write(s).
4900 */
4901 if (!pIemCpu->fNoRem)
4902 {
4903 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
4904 if (pEvtRec)
4905 {
4906 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4907 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
4908 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
4909 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
4910 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
4911 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4912 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4913 }
4914 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4915 {
4916 pEvtRec = iemVerifyAllocRecord(pIemCpu);
4917 if (pEvtRec)
4918 {
4919 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
4920 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
4921 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4922 memcpy(pEvtRec->u.RamWrite.ab,
4923 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
4924 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
4925 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
4926 *pIemCpu->ppIemEvtRecNext = pEvtRec;
4927 }
4928 }
4929 }
4930#endif
4931#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
4932 if (rc == VINF_SUCCESS)
4933 {
4934 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
4935 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
4936 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
4937 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
4938 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
4939 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
4940
4941 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
4942 g_cbIemWrote = cbWrote;
4943 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
4944 }
4945#endif
4946
4947 /*
4948 * Free the mapping entry.
4949 */
4950 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4951 Assert(pIemCpu->cActiveMappings != 0);
4952 pIemCpu->cActiveMappings--;
4953 return rc;
4954}
4955
4956
4957/**
4958 * iemMemMap worker that deals with a request crossing pages.
4959 */
4960static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
4961 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
4962{
4963 /*
4964 * Do the address translations.
4965 */
4966 RTGCPHYS GCPhysFirst;
4967 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
4968 if (rcStrict != VINF_SUCCESS)
4969 return rcStrict;
4970
4971/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
4972 * last byte. */
4973 RTGCPHYS GCPhysSecond;
4974 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
4975 if (rcStrict != VINF_SUCCESS)
4976 return rcStrict;
4977 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
4978
4979 /*
4980 * Read in the current memory content if it's a read, execute or partial
4981 * write access.
4982 */
4983 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
4984 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
4985 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
4986
4987 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
4988 {
4989 int rc;
4990 if (!pIemCpu->fBypassHandlers)
4991 {
4992 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
4993 if (rc != VINF_SUCCESS)
4994 {
4995 /** @todo status code handling */
4996 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
4997 return rc;
4998 }
4999 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5000 if (rc != VINF_SUCCESS)
5001 {
5002 /** @todo status code handling */
5003 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5004 return rc;
5005 }
5006 }
5007 else
5008 {
5009 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5010 if (rc != VINF_SUCCESS)
5011 {
5012 /** @todo status code handling */
5013 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5014 return rc;
5015 }
5016 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5017 if (rc != VINF_SUCCESS)
5018 {
5019 /** @todo status code handling */
5020 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5021 return rc;
5022 }
5023 }
5024
5025#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5026 if ( !pIemCpu->fNoRem
5027 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5028 {
5029 /*
5030 * Record the reads.
5031 */
5032 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5033 if (pEvtRec)
5034 {
5035 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5036 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5037 pEvtRec->u.RamRead.cb = cbFirstPage;
5038 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5039 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5040 }
5041 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5042 if (pEvtRec)
5043 {
5044 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5045 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5046 pEvtRec->u.RamRead.cb = cbSecondPage;
5047 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5048 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5049 }
5050 }
5051#endif
5052 }
5053#ifdef VBOX_STRICT
5054 else
5055 memset(pbBuf, 0xcc, cbMem);
5056#endif
5057#ifdef VBOX_STRICT
5058 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5059 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5060#endif
5061
5062 /*
5063 * Commit the bounce buffer entry.
5064 */
5065 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5066 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5067 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5068 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5069 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5070 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5071 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5072 pIemCpu->cActiveMappings++;
5073
5074 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5075 *ppvMem = pbBuf;
5076 return VINF_SUCCESS;
5077}
5078
5079
5080/**
5081 * iemMemMap woker that deals with iemMemPageMap failures.
5082 */
5083static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5084 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5085{
5086 /*
5087 * Filter out conditions we can handle and the ones which shouldn't happen.
5088 */
5089 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5090 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5091 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5092 {
5093 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5094 return rcMap;
5095 }
5096 pIemCpu->cPotentialExits++;
5097
5098 /*
5099 * Read in the current memory content if it's a read, execute or partial
5100 * write access.
5101 */
5102 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5103 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5104 {
5105 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5106 memset(pbBuf, 0xff, cbMem);
5107 else
5108 {
5109 int rc;
5110 if (!pIemCpu->fBypassHandlers)
5111 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5112 else
5113 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5114 if (rc != VINF_SUCCESS)
5115 {
5116 /** @todo status code handling */
5117 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5118 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5119 return rc;
5120 }
5121 }
5122
5123#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5124 if ( !pIemCpu->fNoRem
5125 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5126 {
5127 /*
5128 * Record the read.
5129 */
5130 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5131 if (pEvtRec)
5132 {
5133 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5134 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5135 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5136 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5137 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5138 }
5139 }
5140#endif
5141 }
5142#ifdef VBOX_STRICT
5143 else
5144 memset(pbBuf, 0xcc, cbMem);
5145#endif
5146#ifdef VBOX_STRICT
5147 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5148 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5149#endif
5150
5151 /*
5152 * Commit the bounce buffer entry.
5153 */
5154 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5155 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5156 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5157 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5158 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5159 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5160 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5161 pIemCpu->cActiveMappings++;
5162
5163 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5164 *ppvMem = pbBuf;
5165 return VINF_SUCCESS;
5166}
5167
5168
5169
5170/**
5171 * Maps the specified guest memory for the given kind of access.
5172 *
5173 * This may be using bounce buffering of the memory if it's crossing a page
5174 * boundary or if there is an access handler installed for any of it. Because
5175 * of lock prefix guarantees, we're in for some extra clutter when this
5176 * happens.
5177 *
5178 * This may raise a \#GP, \#SS, \#PF or \#AC.
5179 *
5180 * @returns VBox strict status code.
5181 *
5182 * @param pIemCpu The IEM per CPU data.
5183 * @param ppvMem Where to return the pointer to the mapped
5184 * memory.
5185 * @param cbMem The number of bytes to map. This is usually 1,
5186 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5187 * string operations it can be up to a page.
5188 * @param iSegReg The index of the segment register to use for
5189 * this access. The base and limits are checked.
5190 * Use UINT8_MAX to indicate that no segmentation
5191 * is required (for IDT, GDT and LDT accesses).
5192 * @param GCPtrMem The address of the guest memory.
5193 * @param a_fAccess How the memory is being accessed. The
5194 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5195 * how to map the memory, while the
5196 * IEM_ACCESS_WHAT_XXX bit is used when raising
5197 * exceptions.
5198 */
5199static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5200{
5201 /*
5202 * Check the input and figure out which mapping entry to use.
5203 */
5204 Assert(cbMem <= 32 || cbMem == 512 || cbMem == 108 || cbMem == 94);
5205 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5206
5207 unsigned iMemMap = pIemCpu->iNextMapping;
5208 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))
5209 {
5210 iMemMap = iemMemMapFindFree(pIemCpu);
5211 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5212 }
5213
5214 /*
5215 * Map the memory, checking that we can actually access it. If something
5216 * slightly complicated happens, fall back on bounce buffering.
5217 */
5218 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5219 if (rcStrict != VINF_SUCCESS)
5220 return rcStrict;
5221
5222 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5223 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5224
5225 RTGCPHYS GCPhysFirst;
5226 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5227 if (rcStrict != VINF_SUCCESS)
5228 return rcStrict;
5229
5230 void *pvMem;
5231 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5232 if (rcStrict != VINF_SUCCESS)
5233 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5234
5235 /*
5236 * Fill in the mapping table entry.
5237 */
5238 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5239 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5240 pIemCpu->iNextMapping = iMemMap + 1;
5241 pIemCpu->cActiveMappings++;
5242
5243 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5244 *ppvMem = pvMem;
5245 return VINF_SUCCESS;
5246}
5247
5248
5249/**
5250 * Commits the guest memory if bounce buffered and unmaps it.
5251 *
5252 * @returns Strict VBox status code.
5253 * @param pIemCpu The IEM per CPU data.
5254 * @param pvMem The mapping.
5255 * @param fAccess The kind of access.
5256 */
5257static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5258{
5259 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5260 AssertReturn(iMemMap >= 0, iMemMap);
5261
5262 /* If it's bounce buffered, we may need to write back the buffer. */
5263 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5264 {
5265 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5266 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5267 }
5268 /* Otherwise unlock it. */
5269 else
5270 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5271
5272 /* Free the entry. */
5273 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5274 Assert(pIemCpu->cActiveMappings != 0);
5275 pIemCpu->cActiveMappings--;
5276 return VINF_SUCCESS;
5277}
5278
5279
5280/**
5281 * Fetches a data byte.
5282 *
5283 * @returns Strict VBox status code.
5284 * @param pIemCpu The IEM per CPU data.
5285 * @param pu8Dst Where to return the byte.
5286 * @param iSegReg The index of the segment register to use for
5287 * this access. The base and limits are checked.
5288 * @param GCPtrMem The address of the guest memory.
5289 */
5290static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5291{
5292 /* The lazy approach for now... */
5293 uint8_t const *pu8Src;
5294 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5295 if (rc == VINF_SUCCESS)
5296 {
5297 *pu8Dst = *pu8Src;
5298 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5299 }
5300 return rc;
5301}
5302
5303
5304/**
5305 * Fetches a data word.
5306 *
5307 * @returns Strict VBox status code.
5308 * @param pIemCpu The IEM per CPU data.
5309 * @param pu16Dst Where to return the word.
5310 * @param iSegReg The index of the segment register to use for
5311 * this access. The base and limits are checked.
5312 * @param GCPtrMem The address of the guest memory.
5313 */
5314static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5315{
5316 /* The lazy approach for now... */
5317 uint16_t const *pu16Src;
5318 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5319 if (rc == VINF_SUCCESS)
5320 {
5321 *pu16Dst = *pu16Src;
5322 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
5323 }
5324 return rc;
5325}
5326
5327
5328/**
5329 * Fetches a data dword.
5330 *
5331 * @returns Strict VBox status code.
5332 * @param pIemCpu The IEM per CPU data.
5333 * @param pu32Dst Where to return the dword.
5334 * @param iSegReg The index of the segment register to use for
5335 * this access. The base and limits are checked.
5336 * @param GCPtrMem The address of the guest memory.
5337 */
5338static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5339{
5340 /* The lazy approach for now... */
5341 uint32_t const *pu32Src;
5342 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5343 if (rc == VINF_SUCCESS)
5344 {
5345 *pu32Dst = *pu32Src;
5346 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
5347 }
5348 return rc;
5349}
5350
5351
5352#ifdef SOME_UNUSED_FUNCTION
5353/**
5354 * Fetches a data dword and sign extends it to a qword.
5355 *
5356 * @returns Strict VBox status code.
5357 * @param pIemCpu The IEM per CPU data.
5358 * @param pu64Dst Where to return the sign extended value.
5359 * @param iSegReg The index of the segment register to use for
5360 * this access. The base and limits are checked.
5361 * @param GCPtrMem The address of the guest memory.
5362 */
5363static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5364{
5365 /* The lazy approach for now... */
5366 int32_t const *pi32Src;
5367 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5368 if (rc == VINF_SUCCESS)
5369 {
5370 *pu64Dst = *pi32Src;
5371 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
5372 }
5373#ifdef __GNUC__ /* warning: GCC may be a royal pain */
5374 else
5375 *pu64Dst = 0;
5376#endif
5377 return rc;
5378}
5379#endif
5380
5381
5382/**
5383 * Fetches a data qword.
5384 *
5385 * @returns Strict VBox status code.
5386 * @param pIemCpu The IEM per CPU data.
5387 * @param pu64Dst Where to return the qword.
5388 * @param iSegReg The index of the segment register to use for
5389 * this access. The base and limits are checked.
5390 * @param GCPtrMem The address of the guest memory.
5391 */
5392static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5393{
5394 /* The lazy approach for now... */
5395 uint64_t const *pu64Src;
5396 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5397 if (rc == VINF_SUCCESS)
5398 {
5399 *pu64Dst = *pu64Src;
5400 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
5401 }
5402 return rc;
5403}
5404
5405
5406/**
5407 * Fetches a data tword.
5408 *
5409 * @returns Strict VBox status code.
5410 * @param pIemCpu The IEM per CPU data.
5411 * @param pr80Dst Where to return the tword.
5412 * @param iSegReg The index of the segment register to use for
5413 * this access. The base and limits are checked.
5414 * @param GCPtrMem The address of the guest memory.
5415 */
5416static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
5417{
5418 /* The lazy approach for now... */
5419 PCRTFLOAT80U pr80Src;
5420 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
5421 if (rc == VINF_SUCCESS)
5422 {
5423 *pr80Dst = *pr80Src;
5424 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
5425 }
5426 return rc;
5427}
5428
5429
5430/**
5431 * Fetches a descriptor register (lgdt, lidt).
5432 *
5433 * @returns Strict VBox status code.
5434 * @param pIemCpu The IEM per CPU data.
5435 * @param pcbLimit Where to return the limit.
5436 * @param pGCPTrBase Where to return the base.
5437 * @param iSegReg The index of the segment register to use for
5438 * this access. The base and limits are checked.
5439 * @param GCPtrMem The address of the guest memory.
5440 * @param enmOpSize The effective operand size.
5441 */
5442static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
5443 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5444{
5445 uint8_t const *pu8Src;
5446 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5447 (void **)&pu8Src,
5448 enmOpSize == IEMMODE_64BIT
5449 ? 2 + 8
5450 : enmOpSize == IEMMODE_32BIT
5451 ? 2 + 4
5452 : 2 + 3,
5453 iSegReg,
5454 GCPtrMem,
5455 IEM_ACCESS_DATA_R);
5456 if (rcStrict == VINF_SUCCESS)
5457 {
5458 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
5459 switch (enmOpSize)
5460 {
5461 case IEMMODE_16BIT:
5462 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
5463 break;
5464 case IEMMODE_32BIT:
5465 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
5466 break;
5467 case IEMMODE_64BIT:
5468 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
5469 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
5470 break;
5471
5472 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5473 }
5474 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
5475 }
5476 return rcStrict;
5477}
5478
5479
5480
5481/**
5482 * Stores a data byte.
5483 *
5484 * @returns Strict VBox status code.
5485 * @param pIemCpu The IEM per CPU data.
5486 * @param iSegReg The index of the segment register to use for
5487 * this access. The base and limits are checked.
5488 * @param GCPtrMem The address of the guest memory.
5489 * @param u8Value The value to store.
5490 */
5491static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
5492{
5493 /* The lazy approach for now... */
5494 uint8_t *pu8Dst;
5495 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5496 if (rc == VINF_SUCCESS)
5497 {
5498 *pu8Dst = u8Value;
5499 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
5500 }
5501 return rc;
5502}
5503
5504
5505/**
5506 * Stores a data word.
5507 *
5508 * @returns Strict VBox status code.
5509 * @param pIemCpu The IEM per CPU data.
5510 * @param iSegReg The index of the segment register to use for
5511 * this access. The base and limits are checked.
5512 * @param GCPtrMem The address of the guest memory.
5513 * @param u16Value The value to store.
5514 */
5515static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
5516{
5517 /* The lazy approach for now... */
5518 uint16_t *pu16Dst;
5519 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5520 if (rc == VINF_SUCCESS)
5521 {
5522 *pu16Dst = u16Value;
5523 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
5524 }
5525 return rc;
5526}
5527
5528
5529/**
5530 * Stores a data dword.
5531 *
5532 * @returns Strict VBox status code.
5533 * @param pIemCpu The IEM per CPU data.
5534 * @param iSegReg The index of the segment register to use for
5535 * this access. The base and limits are checked.
5536 * @param GCPtrMem The address of the guest memory.
5537 * @param u32Value The value to store.
5538 */
5539static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
5540{
5541 /* The lazy approach for now... */
5542 uint32_t *pu32Dst;
5543 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5544 if (rc == VINF_SUCCESS)
5545 {
5546 *pu32Dst = u32Value;
5547 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
5548 }
5549 return rc;
5550}
5551
5552
5553/**
5554 * Stores a data qword.
5555 *
5556 * @returns Strict VBox status code.
5557 * @param pIemCpu The IEM per CPU data.
5558 * @param iSegReg The index of the segment register to use for
5559 * this access. The base and limits are checked.
5560 * @param GCPtrMem The address of the guest memory.
5561 * @param u64Value The value to store.
5562 */
5563static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
5564{
5565 /* The lazy approach for now... */
5566 uint64_t *pu64Dst;
5567 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
5568 if (rc == VINF_SUCCESS)
5569 {
5570 *pu64Dst = u64Value;
5571 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
5572 }
5573 return rc;
5574}
5575
5576
5577/**
5578 * Stores a descriptor register (sgdt, sidt).
5579 *
5580 * @returns Strict VBox status code.
5581 * @param pIemCpu The IEM per CPU data.
5582 * @param cbLimit The limit.
5583 * @param GCPTrBase The base address.
5584 * @param iSegReg The index of the segment register to use for
5585 * this access. The base and limits are checked.
5586 * @param GCPtrMem The address of the guest memory.
5587 * @param enmOpSize The effective operand size.
5588 */
5589static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
5590 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
5591{
5592 uint8_t *pu8Src;
5593 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
5594 (void **)&pu8Src,
5595 enmOpSize == IEMMODE_64BIT
5596 ? 2 + 8
5597 : enmOpSize == IEMMODE_32BIT
5598 ? 2 + 4
5599 : 2 + 3,
5600 iSegReg,
5601 GCPtrMem,
5602 IEM_ACCESS_DATA_W);
5603 if (rcStrict == VINF_SUCCESS)
5604 {
5605 pu8Src[0] = RT_BYTE1(cbLimit);
5606 pu8Src[1] = RT_BYTE2(cbLimit);
5607 pu8Src[2] = RT_BYTE1(GCPtrBase);
5608 pu8Src[3] = RT_BYTE2(GCPtrBase);
5609 pu8Src[4] = RT_BYTE3(GCPtrBase);
5610 if (enmOpSize == IEMMODE_16BIT)
5611 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
5612 else
5613 {
5614 pu8Src[5] = RT_BYTE4(GCPtrBase);
5615 if (enmOpSize == IEMMODE_64BIT)
5616 {
5617 pu8Src[6] = RT_BYTE5(GCPtrBase);
5618 pu8Src[7] = RT_BYTE6(GCPtrBase);
5619 pu8Src[8] = RT_BYTE7(GCPtrBase);
5620 pu8Src[9] = RT_BYTE8(GCPtrBase);
5621 }
5622 }
5623 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
5624 }
5625 return rcStrict;
5626}
5627
5628
5629/**
5630 * Pushes a word onto the stack.
5631 *
5632 * @returns Strict VBox status code.
5633 * @param pIemCpu The IEM per CPU data.
5634 * @param u16Value The value to push.
5635 */
5636static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
5637{
5638 /* Increment the stack pointer. */
5639 uint64_t uNewRsp;
5640 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5641 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);
5642
5643 /* Write the word the lazy way. */
5644 uint16_t *pu16Dst;
5645 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5646 if (rc == VINF_SUCCESS)
5647 {
5648 *pu16Dst = u16Value;
5649 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5650 }
5651
5652 /* Commit the new RSP value unless we an access handler made trouble. */
5653 if (rc == VINF_SUCCESS)
5654 pCtx->rsp = uNewRsp;
5655
5656 return rc;
5657}
5658
5659
5660/**
5661 * Pushes a dword onto the stack.
5662 *
5663 * @returns Strict VBox status code.
5664 * @param pIemCpu The IEM per CPU data.
5665 * @param u32Value The value to push.
5666 */
5667static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
5668{
5669 /* Increment the stack pointer. */
5670 uint64_t uNewRsp;
5671 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5672 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);
5673
5674 /* Write the word the lazy way. */
5675 uint32_t *pu32Dst;
5676 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5677 if (rc == VINF_SUCCESS)
5678 {
5679 *pu32Dst = u32Value;
5680 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5681 }
5682
5683 /* Commit the new RSP value unless we an access handler made trouble. */
5684 if (rc == VINF_SUCCESS)
5685 pCtx->rsp = uNewRsp;
5686
5687 return rc;
5688}
5689
5690
5691/**
5692 * Pushes a qword onto the stack.
5693 *
5694 * @returns Strict VBox status code.
5695 * @param pIemCpu The IEM per CPU data.
5696 * @param u64Value The value to push.
5697 */
5698static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
5699{
5700 /* Increment the stack pointer. */
5701 uint64_t uNewRsp;
5702 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5703 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);
5704
5705 /* Write the word the lazy way. */
5706 uint64_t *pu64Dst;
5707 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5708 if (rc == VINF_SUCCESS)
5709 {
5710 *pu64Dst = u64Value;
5711 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5712 }
5713
5714 /* Commit the new RSP value unless we an access handler made trouble. */
5715 if (rc == VINF_SUCCESS)
5716 pCtx->rsp = uNewRsp;
5717
5718 return rc;
5719}
5720
5721
5722/**
5723 * Pops a word from the stack.
5724 *
5725 * @returns Strict VBox status code.
5726 * @param pIemCpu The IEM per CPU data.
5727 * @param pu16Value Where to store the popped value.
5728 */
5729static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
5730{
5731 /* Increment the stack pointer. */
5732 uint64_t uNewRsp;
5733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5734 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);
5735
5736 /* Write the word the lazy way. */
5737 uint16_t const *pu16Src;
5738 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5739 if (rc == VINF_SUCCESS)
5740 {
5741 *pu16Value = *pu16Src;
5742 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5743
5744 /* Commit the new RSP value. */
5745 if (rc == VINF_SUCCESS)
5746 pCtx->rsp = uNewRsp;
5747 }
5748
5749 return rc;
5750}
5751
5752
5753/**
5754 * Pops a dword from the stack.
5755 *
5756 * @returns Strict VBox status code.
5757 * @param pIemCpu The IEM per CPU data.
5758 * @param pu32Value Where to store the popped value.
5759 */
5760static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
5761{
5762 /* Increment the stack pointer. */
5763 uint64_t uNewRsp;
5764 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5765 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);
5766
5767 /* Write the word the lazy way. */
5768 uint32_t const *pu32Src;
5769 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5770 if (rc == VINF_SUCCESS)
5771 {
5772 *pu32Value = *pu32Src;
5773 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5774
5775 /* Commit the new RSP value. */
5776 if (rc == VINF_SUCCESS)
5777 pCtx->rsp = uNewRsp;
5778 }
5779
5780 return rc;
5781}
5782
5783
5784/**
5785 * Pops a qword from the stack.
5786 *
5787 * @returns Strict VBox status code.
5788 * @param pIemCpu The IEM per CPU data.
5789 * @param pu64Value Where to store the popped value.
5790 */
5791static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
5792{
5793 /* Increment the stack pointer. */
5794 uint64_t uNewRsp;
5795 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5796 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);
5797
5798 /* Write the word the lazy way. */
5799 uint64_t const *pu64Src;
5800 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5801 if (rc == VINF_SUCCESS)
5802 {
5803 *pu64Value = *pu64Src;
5804 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5805
5806 /* Commit the new RSP value. */
5807 if (rc == VINF_SUCCESS)
5808 pCtx->rsp = uNewRsp;
5809 }
5810
5811 return rc;
5812}
5813
5814
5815/**
5816 * Pushes a word onto the stack, using a temporary stack pointer.
5817 *
5818 * @returns Strict VBox status code.
5819 * @param pIemCpu The IEM per CPU data.
5820 * @param u16Value The value to push.
5821 * @param pTmpRsp Pointer to the temporary stack pointer.
5822 */
5823static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
5824{
5825 /* Increment the stack pointer. */
5826 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5827 RTUINT64U NewRsp = *pTmpRsp;
5828 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);
5829
5830 /* Write the word the lazy way. */
5831 uint16_t *pu16Dst;
5832 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5833 if (rc == VINF_SUCCESS)
5834 {
5835 *pu16Dst = u16Value;
5836 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
5837 }
5838
5839 /* Commit the new RSP value unless we an access handler made trouble. */
5840 if (rc == VINF_SUCCESS)
5841 *pTmpRsp = NewRsp;
5842
5843 return rc;
5844}
5845
5846
5847/**
5848 * Pushes a dword onto the stack, using a temporary stack pointer.
5849 *
5850 * @returns Strict VBox status code.
5851 * @param pIemCpu The IEM per CPU data.
5852 * @param u32Value The value to push.
5853 * @param pTmpRsp Pointer to the temporary stack pointer.
5854 */
5855static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
5856{
5857 /* Increment the stack pointer. */
5858 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5859 RTUINT64U NewRsp = *pTmpRsp;
5860 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);
5861
5862 /* Write the word the lazy way. */
5863 uint32_t *pu32Dst;
5864 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5865 if (rc == VINF_SUCCESS)
5866 {
5867 *pu32Dst = u32Value;
5868 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
5869 }
5870
5871 /* Commit the new RSP value unless we an access handler made trouble. */
5872 if (rc == VINF_SUCCESS)
5873 *pTmpRsp = NewRsp;
5874
5875 return rc;
5876}
5877
5878
5879/**
5880 * Pushes a dword onto the stack, using a temporary stack pointer.
5881 *
5882 * @returns Strict VBox status code.
5883 * @param pIemCpu The IEM per CPU data.
5884 * @param u64Value The value to push.
5885 * @param pTmpRsp Pointer to the temporary stack pointer.
5886 */
5887static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
5888{
5889 /* Increment the stack pointer. */
5890 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5891 RTUINT64U NewRsp = *pTmpRsp;
5892 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);
5893
5894 /* Write the word the lazy way. */
5895 uint64_t *pu64Dst;
5896 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
5897 if (rc == VINF_SUCCESS)
5898 {
5899 *pu64Dst = u64Value;
5900 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
5901 }
5902
5903 /* Commit the new RSP value unless we an access handler made trouble. */
5904 if (rc == VINF_SUCCESS)
5905 *pTmpRsp = NewRsp;
5906
5907 return rc;
5908}
5909
5910
5911/**
5912 * Pops a word from the stack, using a temporary stack pointer.
5913 *
5914 * @returns Strict VBox status code.
5915 * @param pIemCpu The IEM per CPU data.
5916 * @param pu16Value Where to store the popped value.
5917 * @param pTmpRsp Pointer to the temporary stack pointer.
5918 */
5919static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
5920{
5921 /* Increment the stack pointer. */
5922 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5923 RTUINT64U NewRsp = *pTmpRsp;
5924 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);
5925
5926 /* Write the word the lazy way. */
5927 uint16_t const *pu16Src;
5928 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5929 if (rc == VINF_SUCCESS)
5930 {
5931 *pu16Value = *pu16Src;
5932 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
5933
5934 /* Commit the new RSP value. */
5935 if (rc == VINF_SUCCESS)
5936 *pTmpRsp = NewRsp;
5937 }
5938
5939 return rc;
5940}
5941
5942
5943/**
5944 * Pops a dword from the stack, using a temporary stack pointer.
5945 *
5946 * @returns Strict VBox status code.
5947 * @param pIemCpu The IEM per CPU data.
5948 * @param pu32Value Where to store the popped value.
5949 * @param pTmpRsp Pointer to the temporary stack pointer.
5950 */
5951static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
5952{
5953 /* Increment the stack pointer. */
5954 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5955 RTUINT64U NewRsp = *pTmpRsp;
5956 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);
5957
5958 /* Write the word the lazy way. */
5959 uint32_t const *pu32Src;
5960 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5961 if (rc == VINF_SUCCESS)
5962 {
5963 *pu32Value = *pu32Src;
5964 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
5965
5966 /* Commit the new RSP value. */
5967 if (rc == VINF_SUCCESS)
5968 *pTmpRsp = NewRsp;
5969 }
5970
5971 return rc;
5972}
5973
5974
5975/**
5976 * Pops a qword from the stack, using a temporary stack pointer.
5977 *
5978 * @returns Strict VBox status code.
5979 * @param pIemCpu The IEM per CPU data.
5980 * @param pu64Value Where to store the popped value.
5981 * @param pTmpRsp Pointer to the temporary stack pointer.
5982 */
5983static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
5984{
5985 /* Increment the stack pointer. */
5986 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5987 RTUINT64U NewRsp = *pTmpRsp;
5988 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
5989
5990 /* Write the word the lazy way. */
5991 uint64_t const *pu64Src;
5992 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
5993 if (rcStrict == VINF_SUCCESS)
5994 {
5995 *pu64Value = *pu64Src;
5996 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
5997
5998 /* Commit the new RSP value. */
5999 if (rcStrict == VINF_SUCCESS)
6000 *pTmpRsp = NewRsp;
6001 }
6002
6003 return rcStrict;
6004}
6005
6006
6007/**
6008 * Begin a special stack push (used by interrupt, exceptions and such).
6009 *
6010 * This will raise #SS or #PF if appropriate.
6011 *
6012 * @returns Strict VBox status code.
6013 * @param pIemCpu The IEM per CPU data.
6014 * @param cbMem The number of bytes to push onto the stack.
6015 * @param ppvMem Where to return the pointer to the stack memory.
6016 * As with the other memory functions this could be
6017 * direct access or bounce buffered access, so
6018 * don't commit register until the commit call
6019 * succeeds.
6020 * @param puNewRsp Where to return the new RSP value. This must be
6021 * passed unchanged to
6022 * iemMemStackPushCommitSpecial().
6023 */
6024static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6025{
6026 Assert(cbMem < UINT8_MAX);
6027 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6028 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);
6029 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6030}
6031
6032
6033/**
6034 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6035 *
6036 * This will update the rSP.
6037 *
6038 * @returns Strict VBox status code.
6039 * @param pIemCpu The IEM per CPU data.
6040 * @param pvMem The pointer returned by
6041 * iemMemStackPushBeginSpecial().
6042 * @param uNewRsp The new RSP value returned by
6043 * iemMemStackPushBeginSpecial().
6044 */
6045static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6046{
6047 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6048 if (rcStrict == VINF_SUCCESS)
6049 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6050 return rcStrict;
6051}
6052
6053
6054/**
6055 * Begin a special stack pop (used by iret, retf and such).
6056 *
6057 * This will raise \#SS or \#PF if appropriate.
6058 *
6059 * @returns Strict VBox status code.
6060 * @param pIemCpu The IEM per CPU data.
6061 * @param cbMem The number of bytes to push onto the stack.
6062 * @param ppvMem Where to return the pointer to the stack memory.
6063 * @param puNewRsp Where to return the new RSP value. This must be
6064 * passed unchanged to
6065 * iemMemStackPopCommitSpecial() or applied
6066 * manually if iemMemStackPopDoneSpecial() is used.
6067 */
6068static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6069{
6070 Assert(cbMem < UINT8_MAX);
6071 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6072 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);
6073 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6074}
6075
6076
6077/**
6078 * Continue a special stack pop (used by iret and retf).
6079 *
6080 * This will raise \#SS or \#PF if appropriate.
6081 *
6082 * @returns Strict VBox status code.
6083 * @param pIemCpu The IEM per CPU data.
6084 * @param cbMem The number of bytes to push onto the stack.
6085 * @param ppvMem Where to return the pointer to the stack memory.
6086 * @param puNewRsp Where to return the new RSP value. This must be
6087 * passed unchanged to
6088 * iemMemStackPopCommitSpecial() or applied
6089 * manually if iemMemStackPopDoneSpecial() is used.
6090 */
6091static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6092{
6093 Assert(cbMem < UINT8_MAX);
6094 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6095 RTUINT64U NewRsp;
6096 NewRsp.u = *puNewRsp;
6097 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);
6098 *puNewRsp = NewRsp.u;
6099 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6100}
6101
6102
6103/**
6104 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
6105 *
6106 * This will update the rSP.
6107 *
6108 * @returns Strict VBox status code.
6109 * @param pIemCpu The IEM per CPU data.
6110 * @param pvMem The pointer returned by
6111 * iemMemStackPopBeginSpecial().
6112 * @param uNewRsp The new RSP value returned by
6113 * iemMemStackPopBeginSpecial().
6114 */
6115static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
6116{
6117 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6118 if (rcStrict == VINF_SUCCESS)
6119 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6120 return rcStrict;
6121}
6122
6123
6124/**
6125 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
6126 * iemMemStackPopContinueSpecial).
6127 *
6128 * The caller will manually commit the rSP.
6129 *
6130 * @returns Strict VBox status code.
6131 * @param pIemCpu The IEM per CPU data.
6132 * @param pvMem The pointer returned by
6133 * iemMemStackPopBeginSpecial() or
6134 * iemMemStackPopContinueSpecial().
6135 */
6136static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
6137{
6138 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
6139}
6140
6141
6142/**
6143 * Fetches a system table dword.
6144 *
6145 * @returns Strict VBox status code.
6146 * @param pIemCpu The IEM per CPU data.
6147 * @param pu32Dst Where to return the dword.
6148 * @param iSegReg The index of the segment register to use for
6149 * this access. The base and limits are checked.
6150 * @param GCPtrMem The address of the guest memory.
6151 */
6152static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6153{
6154 /* The lazy approach for now... */
6155 uint32_t const *pu32Src;
6156 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6157 if (rc == VINF_SUCCESS)
6158 {
6159 *pu32Dst = *pu32Src;
6160 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
6161 }
6162 return rc;
6163}
6164
6165
6166/**
6167 * Fetches a system table qword.
6168 *
6169 * @returns Strict VBox status code.
6170 * @param pIemCpu The IEM per CPU data.
6171 * @param pu64Dst Where to return the qword.
6172 * @param iSegReg The index of the segment register to use for
6173 * this access. The base and limits are checked.
6174 * @param GCPtrMem The address of the guest memory.
6175 */
6176static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6177{
6178 /* The lazy approach for now... */
6179 uint64_t const *pu64Src;
6180 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
6181 if (rc == VINF_SUCCESS)
6182 {
6183 *pu64Dst = *pu64Src;
6184 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
6185 }
6186 return rc;
6187}
6188
6189
6190/**
6191 * Fetches a descriptor table entry.
6192 *
6193 * @returns Strict VBox status code.
6194 * @param pIemCpu The IEM per CPU.
6195 * @param pDesc Where to return the descriptor table entry.
6196 * @param uSel The selector which table entry to fetch.
6197 */
6198static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)
6199{
6200 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6201
6202 /** @todo did the 286 require all 8 bytes to be accessible? */
6203 /*
6204 * Get the selector table base and check bounds.
6205 */
6206 RTGCPTR GCPtrBase;
6207 if (uSel & X86_SEL_LDT)
6208 {
6209 if ( !pCtx->ldtr.Attr.n.u1Present
6210 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
6211 {
6212 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
6213 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
6214 /** @todo is this the right exception? */
6215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6216 }
6217
6218 Assert(pCtx->ldtr.Attr.n.u1Present);
6219 GCPtrBase = pCtx->ldtr.u64Base;
6220 }
6221 else
6222 {
6223 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
6224 {
6225 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
6226 /** @todo is this the right exception? */
6227 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6228 }
6229 GCPtrBase = pCtx->gdtr.pGdt;
6230 }
6231
6232 /*
6233 * Read the legacy descriptor and maybe the long mode extensions if
6234 * required.
6235 */
6236 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
6237 if (rcStrict == VINF_SUCCESS)
6238 {
6239 if ( !IEM_IS_LONG_MODE(pIemCpu)
6240 || pDesc->Legacy.Gen.u1DescType)
6241 pDesc->Long.au64[1] = 0;
6242 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
6243 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
6244 else
6245 {
6246 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
6247 /** @todo is this the right exception? */
6248 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
6249 }
6250 }
6251 return rcStrict;
6252}
6253
6254
6255/**
6256 * Fakes a long mode stack selector for SS = 0.
6257 *
6258 * @param pDescSs Where to return the fake stack descriptor.
6259 * @param uDpl The DPL we want.
6260 */
6261static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
6262{
6263 pDescSs->Long.au64[0] = 0;
6264 pDescSs->Long.au64[1] = 0;
6265 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
6266 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
6267 pDescSs->Long.Gen.u2Dpl = uDpl;
6268 pDescSs->Long.Gen.u1Present = 1;
6269 pDescSs->Long.Gen.u1Long = 1;
6270}
6271
6272
6273/**
6274 * Marks the selector descriptor as accessed (only non-system descriptors).
6275 *
6276 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
6277 * will therefore skip the limit checks.
6278 *
6279 * @returns Strict VBox status code.
6280 * @param pIemCpu The IEM per CPU.
6281 * @param uSel The selector.
6282 */
6283static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
6284{
6285 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6286
6287 /*
6288 * Get the selector table base and calculate the entry address.
6289 */
6290 RTGCPTR GCPtr = uSel & X86_SEL_LDT
6291 ? pCtx->ldtr.u64Base
6292 : pCtx->gdtr.pGdt;
6293 GCPtr += uSel & X86_SEL_MASK;
6294
6295 /*
6296 * ASMAtomicBitSet will assert if the address is misaligned, so do some
6297 * ugly stuff to avoid this. This will make sure it's an atomic access
6298 * as well more or less remove any question about 8-bit or 32-bit accesss.
6299 */
6300 VBOXSTRICTRC rcStrict;
6301 uint32_t volatile *pu32;
6302 if ((GCPtr & 3) == 0)
6303 {
6304 /* The normal case, map the 32-bit bits around the accessed bit (40). */
6305 GCPtr += 2 + 2;
6306 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6307 if (rcStrict != VINF_SUCCESS)
6308 return rcStrict;
6309 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
6310 }
6311 else
6312 {
6313 /* The misaligned GDT/LDT case, map the whole thing. */
6314 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
6315 if (rcStrict != VINF_SUCCESS)
6316 return rcStrict;
6317 switch ((uintptr_t)pu32 & 3)
6318 {
6319 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
6320 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
6321 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
6322 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
6323 }
6324 }
6325
6326 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
6327}
6328
6329/** @} */
6330
6331
6332/*
6333 * Include the C/C++ implementation of instruction.
6334 */
6335#include "IEMAllCImpl.cpp.h"
6336
6337
6338
6339/** @name "Microcode" macros.
6340 *
6341 * The idea is that we should be able to use the same code to interpret
6342 * instructions as well as recompiler instructions. Thus this obfuscation.
6343 *
6344 * @{
6345 */
6346#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
6347#define IEM_MC_END() }
6348#define IEM_MC_PAUSE() do {} while (0)
6349#define IEM_MC_CONTINUE() do {} while (0)
6350
6351/** Internal macro. */
6352#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
6353 do \
6354 { \
6355 VBOXSTRICTRC rcStrict2 = a_Expr; \
6356 if (rcStrict2 != VINF_SUCCESS) \
6357 return rcStrict2; \
6358 } while (0)
6359
6360#define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)
6361#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
6362#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
6363#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
6364#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
6365#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
6366#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
6367
6368#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
6369#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
6370 do { \
6371 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
6372 return iemRaiseDeviceNotAvailable(pIemCpu); \
6373 } while (0)
6374#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
6375 do { \
6376 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
6377 return iemRaiseMathFault(pIemCpu); \
6378 } while (0)
6379#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
6380 do { \
6381 if (pIemCpu->uCpl != 0) \
6382 return iemRaiseGeneralProtectionFault0(pIemCpu); \
6383 } while (0)
6384
6385
6386#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
6387#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
6388#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
6389#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
6390#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
6391#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
6392#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
6393 uint32_t a_Name; \
6394 uint32_t *a_pName = &a_Name
6395#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
6396 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
6397
6398#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
6399#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
6400
6401#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6402#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6403#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6404#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
6405#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6406#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6407#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
6408#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6409#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6410#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
6411#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6412#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
6413#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6414#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
6415#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
6416#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
6417#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
6418#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6419#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6420#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
6421#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6422#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
6423#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
6424#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6425#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6426#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
6427#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6428#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6429#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
6430/** @note Not for IOPL or IF testing or modification. */
6431#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6432#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6433#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
6434#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
6435
6436#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
6437#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
6438#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
6439#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
6440#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
6441#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
6442#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
6443#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
6444#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
6445#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
6446#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
6447 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
6448
6449#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
6450#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
6451/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
6452 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
6453#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
6454#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
6455/** @note Not for IOPL or IF testing or modification. */
6456#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
6457
6458#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
6459#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
6460#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
6461 do { \
6462 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6463 *pu32Reg += (a_u32Value); \
6464 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6465 } while (0)
6466#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
6467
6468#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
6469#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
6470#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
6471 do { \
6472 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6473 *pu32Reg -= (a_u32Value); \
6474 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6475 } while (0)
6476#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
6477
6478#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
6479#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
6480#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
6481#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
6482#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
6483#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
6484#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
6485
6486#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
6487#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
6488#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6489#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
6490
6491#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
6492#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
6493#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
6494
6495#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
6496#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6497
6498#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
6499#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
6500#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
6501
6502#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
6503#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
6504#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
6505
6506#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
6507
6508#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
6509
6510#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
6511#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
6512#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
6513 do { \
6514 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6515 *pu32Reg &= (a_u32Value); \
6516 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6517 } while (0)
6518#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
6519
6520#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
6521#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
6522#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
6523 do { \
6524 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
6525 *pu32Reg |= (a_u32Value); \
6526 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
6527 } while (0)
6528#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
6529
6530
6531/** @note Not for IOPL or IF modification. */
6532#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
6533/** @note Not for IOPL or IF modification. */
6534#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
6535/** @note Not for IOPL or IF modification. */
6536#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
6537
6538#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
6539
6540
6541#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
6542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
6543#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
6544 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
6545#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
6546 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
6547
6548#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6549 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
6550#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6551 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6552#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
6553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
6554
6555#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
6557#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6559#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
6560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
6561
6562#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6563 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6564
6565#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6566 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
6567#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
6568 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
6569
6570#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
6571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
6572#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
6573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
6574#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
6575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
6576
6577
6578#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6579 do { \
6580 uint8_t u8Tmp; \
6581 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6582 (a_u16Dst) = u8Tmp; \
6583 } while (0)
6584#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6585 do { \
6586 uint8_t u8Tmp; \
6587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6588 (a_u32Dst) = u8Tmp; \
6589 } while (0)
6590#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6591 do { \
6592 uint8_t u8Tmp; \
6593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6594 (a_u64Dst) = u8Tmp; \
6595 } while (0)
6596#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6597 do { \
6598 uint16_t u16Tmp; \
6599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6600 (a_u32Dst) = u16Tmp; \
6601 } while (0)
6602#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6603 do { \
6604 uint16_t u16Tmp; \
6605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6606 (a_u64Dst) = u16Tmp; \
6607 } while (0)
6608#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6609 do { \
6610 uint32_t u32Tmp; \
6611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6612 (a_u64Dst) = u32Tmp; \
6613 } while (0)
6614
6615#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
6616 do { \
6617 uint8_t u8Tmp; \
6618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6619 (a_u16Dst) = (int8_t)u8Tmp; \
6620 } while (0)
6621#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6622 do { \
6623 uint8_t u8Tmp; \
6624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6625 (a_u32Dst) = (int8_t)u8Tmp; \
6626 } while (0)
6627#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6628 do { \
6629 uint8_t u8Tmp; \
6630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
6631 (a_u64Dst) = (int8_t)u8Tmp; \
6632 } while (0)
6633#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
6634 do { \
6635 uint16_t u16Tmp; \
6636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6637 (a_u32Dst) = (int16_t)u16Tmp; \
6638 } while (0)
6639#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6640 do { \
6641 uint16_t u16Tmp; \
6642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
6643 (a_u64Dst) = (int16_t)u16Tmp; \
6644 } while (0)
6645#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
6646 do { \
6647 uint32_t u32Tmp; \
6648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
6649 (a_u64Dst) = (int32_t)u32Tmp; \
6650 } while (0)
6651
6652#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
6653 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
6654#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
6655 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
6656#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
6657 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
6658#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
6659 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
6660
6661#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
6662 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
6663#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
6664 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
6665#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
6666 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
6667#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
6668 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
6669
6670#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
6671#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
6672#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
6673#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
6674#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
6675#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
6676#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
6677 do { \
6678 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
6679 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
6680 } while (0)
6681
6682
6683#define IEM_MC_PUSH_U16(a_u16Value) \
6684 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
6685#define IEM_MC_PUSH_U32(a_u32Value) \
6686 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
6687#define IEM_MC_PUSH_U64(a_u64Value) \
6688 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
6689
6690#define IEM_MC_POP_U16(a_pu16Value) \
6691 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
6692#define IEM_MC_POP_U32(a_pu32Value) \
6693 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
6694#define IEM_MC_POP_U64(a_pu64Value) \
6695 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
6696
6697/** Maps guest memory for direct or bounce buffered access.
6698 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6699 * @remarks May return.
6700 */
6701#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
6702 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6703
6704/** Maps guest memory for direct or bounce buffered access.
6705 * The purpose is to pass it to an operand implementation, thus the a_iArg.
6706 * @remarks May return.
6707 */
6708#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
6709 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
6710
6711/** Commits the memory and unmaps the guest memory.
6712 * @remarks May return.
6713 */
6714#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
6715 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
6716
6717/** Commits the memory and unmaps the guest memory unless the FPU status word
6718 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
6719 * that would cause FLD not to store.
6720 *
6721 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
6722 * store, while \#P will not.
6723 *
6724 * @remarks May in theory return - for now.
6725 */
6726#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
6727 do { \
6728 if ( !(a_u16FSW & X86_FSW_ES) \
6729 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
6730 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
6731 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
6732 } while (0)
6733
6734/** Calculate efficient address from R/M. */
6735#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \
6736 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))
6737
6738#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
6739#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
6740#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
6741#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
6742#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
6743
6744/**
6745 * Defers the rest of the instruction emulation to a C implementation routine
6746 * and returns, only taking the standard parameters.
6747 *
6748 * @param a_pfnCImpl The pointer to the C routine.
6749 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6750 */
6751#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6752
6753/**
6754 * Defers the rest of instruction emulation to a C implementation routine and
6755 * returns, taking one argument in addition to the standard ones.
6756 *
6757 * @param a_pfnCImpl The pointer to the C routine.
6758 * @param a0 The argument.
6759 */
6760#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6761
6762/**
6763 * Defers the rest of the instruction emulation to a C implementation routine
6764 * and returns, taking two arguments in addition to the standard ones.
6765 *
6766 * @param a_pfnCImpl The pointer to the C routine.
6767 * @param a0 The first extra argument.
6768 * @param a1 The second extra argument.
6769 */
6770#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6771
6772/**
6773 * Defers the rest of the instruction emulation to a C implementation routine
6774 * and returns, taking two arguments in addition to the standard ones.
6775 *
6776 * @param a_pfnCImpl The pointer to the C routine.
6777 * @param a0 The first extra argument.
6778 * @param a1 The second extra argument.
6779 * @param a2 The third extra argument.
6780 */
6781#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6782
6783/**
6784 * Defers the rest of the instruction emulation to a C implementation routine
6785 * and returns, taking two arguments in addition to the standard ones.
6786 *
6787 * @param a_pfnCImpl The pointer to the C routine.
6788 * @param a0 The first extra argument.
6789 * @param a1 The second extra argument.
6790 * @param a2 The third extra argument.
6791 * @param a3 The fourth extra argument.
6792 * @param a4 The fifth extra argument.
6793 */
6794#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
6795
6796/**
6797 * Defers the entire instruction emulation to a C implementation routine and
6798 * returns, only taking the standard parameters.
6799 *
6800 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6801 *
6802 * @param a_pfnCImpl The pointer to the C routine.
6803 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
6804 */
6805#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
6806
6807/**
6808 * Defers the entire instruction emulation to a C implementation routine and
6809 * returns, taking one argument in addition to the standard ones.
6810 *
6811 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6812 *
6813 * @param a_pfnCImpl The pointer to the C routine.
6814 * @param a0 The argument.
6815 */
6816#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
6817
6818/**
6819 * Defers the entire instruction emulation to a C implementation routine and
6820 * returns, taking two arguments in addition to the standard ones.
6821 *
6822 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6823 *
6824 * @param a_pfnCImpl The pointer to the C routine.
6825 * @param a0 The first extra argument.
6826 * @param a1 The second extra argument.
6827 */
6828#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
6829
6830/**
6831 * Defers the entire instruction emulation to a C implementation routine and
6832 * returns, taking three arguments in addition to the standard ones.
6833 *
6834 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
6835 *
6836 * @param a_pfnCImpl The pointer to the C routine.
6837 * @param a0 The first extra argument.
6838 * @param a1 The second extra argument.
6839 * @param a2 The third extra argument.
6840 */
6841#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
6842
6843/**
6844 * Calls a FPU assembly implementation taking one visible argument.
6845 *
6846 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6847 * @param a0 The first extra argument.
6848 */
6849#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
6850 do { \
6851 iemFpuPrepareUsage(pIemCpu); \
6852 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
6853 } while (0)
6854
6855/**
6856 * Calls a FPU assembly implementation taking two visible arguments.
6857 *
6858 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6859 * @param a0 The first extra argument.
6860 * @param a1 The second extra argument.
6861 */
6862#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
6863 do { \
6864 iemFpuPrepareUsage(pIemCpu); \
6865 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
6866 } while (0)
6867
6868/**
6869 * Calls a FPU assembly implementation taking three visible arguments.
6870 *
6871 * @param a_pfnAImpl Pointer to the assembly FPU routine.
6872 * @param a0 The first extra argument.
6873 * @param a1 The second extra argument.
6874 * @param a2 The third extra argument.
6875 */
6876#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
6877 do { \
6878 iemFpuPrepareUsage(pIemCpu); \
6879 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
6880 } while (0)
6881
6882#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
6883 do { \
6884 (a_FpuData).FSW = (a_FSW); \
6885 (a_FpuData).r80Result = *(a_pr80Value); \
6886 } while (0)
6887
6888/** Pushes FPU result onto the stack. */
6889#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
6890 iemFpuPushResult(pIemCpu, &a_FpuData)
6891/** Pushes FPU result onto the stack and sets the FPUDP. */
6892#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
6893 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
6894
6895/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
6896#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
6897 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
6898
6899/** Stores FPU result in a stack register. */
6900#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
6901 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
6902/** Stores FPU result in a stack register and pops the stack. */
6903#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
6904 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
6905/** Stores FPU result in a stack register and sets the FPUDP. */
6906#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6907 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6908/** Stores FPU result in a stack register, sets the FPUDP, and pops the
6909 * stack. */
6910#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
6911 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
6912
6913/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
6914#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
6915 iemFpuUpdateOpcodeAndIp(pIemCpu)
6916/** Free a stack register (for FFREE and FFREEP). */
6917#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
6918 iemFpuStackFree(pIemCpu, a_iStReg)
6919/** Increment the FPU stack pointer. */
6920#define IEM_MC_FPU_STACK_INC_TOP() \
6921 iemFpuStackIncTop(pIemCpu)
6922/** Decrement the FPU stack pointer. */
6923#define IEM_MC_FPU_STACK_DEC_TOP() \
6924 iemFpuStackDecTop(pIemCpu)
6925
6926/** Updates the FSW, FOP, FPUIP, and FPUCS. */
6927#define IEM_MC_UPDATE_FSW(a_u16FSW) \
6928 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6929/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
6930#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
6931 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
6932/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
6933#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6934 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6935/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
6936#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
6937 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6938/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
6939 * stack. */
6940#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
6941 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
6942/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
6943#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
6944 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
6945
6946/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
6947#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
6948 iemFpuStackUnderflow(pIemCpu, a_iStDst)
6949/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6950 * stack. */
6951#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
6952 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
6953/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6954 * FPUDS. */
6955#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6956 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6957/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
6958 * FPUDS. Pops stack. */
6959#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
6960 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
6961/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
6962 * stack twice. */
6963#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
6964 iemFpuStackUnderflowThenPopPop(pIemCpu)
6965/** Raises a FPU stack underflow exception for an instruction pushing a result
6966 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
6967#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
6968 iemFpuStackPushUnderflow(pIemCpu)
6969/** Raises a FPU stack underflow exception for an instruction pushing a result
6970 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
6971#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
6972 iemFpuStackPushUnderflowTwo(pIemCpu)
6973
6974/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6975 * FPUIP, FPUCS and FOP. */
6976#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
6977 iemFpuStackPushOverflow(pIemCpu)
6978/** Raises a FPU stack overflow exception as part of a push attempt. Sets
6979 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
6980#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
6981 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
6982/** Indicates that we (might) have modified the FPU state. */
6983#define IEM_MC_USED_FPU() \
6984 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
6985
6986/** @note Not for IOPL or IF testing. */
6987#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
6988/** @note Not for IOPL or IF testing. */
6989#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
6990/** @note Not for IOPL or IF testing. */
6991#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
6992/** @note Not for IOPL or IF testing. */
6993#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
6994/** @note Not for IOPL or IF testing. */
6995#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
6996 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
6997 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
6998/** @note Not for IOPL or IF testing. */
6999#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
7000 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7001 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7002/** @note Not for IOPL or IF testing. */
7003#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
7004 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7005 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7006 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7007/** @note Not for IOPL or IF testing. */
7008#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
7009 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
7010 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
7011 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
7012#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
7013#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
7014#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
7015/** @note Not for IOPL or IF testing. */
7016#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7017 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7018 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7019/** @note Not for IOPL or IF testing. */
7020#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7021 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7022 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7023/** @note Not for IOPL or IF testing. */
7024#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
7025 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7026 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7027/** @note Not for IOPL or IF testing. */
7028#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7029 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
7030 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7031/** @note Not for IOPL or IF testing. */
7032#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7033 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
7034 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7035/** @note Not for IOPL or IF testing. */
7036#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
7037 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
7038 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
7039#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
7040#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
7041#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
7042 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
7043#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
7044 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
7045#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
7046 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
7047#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
7048 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
7049#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
7050 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
7051#define IEM_MC_IF_FCW_IM() \
7052 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
7053
7054#define IEM_MC_ELSE() } else {
7055#define IEM_MC_ENDIF() } do {} while (0)
7056
7057/** @} */
7058
7059
7060/** @name Opcode Debug Helpers.
7061 * @{
7062 */
7063#ifdef DEBUG
7064# define IEMOP_MNEMONIC(a_szMnemonic) \
7065 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7066 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
7067# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
7068 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
7069 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
7070#else
7071# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
7072# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
7073#endif
7074
7075/** @} */
7076
7077
7078/** @name Opcode Helpers.
7079 * @{
7080 */
7081
7082/** The instruction raises an \#UD in real and V8086 mode. */
7083#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
7084 do \
7085 { \
7086 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
7087 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7088 } while (0)
7089
7090/** The instruction allows no lock prefixing (in this encoding), throw #UD if
7091 * lock prefixed.
7092 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
7093#define IEMOP_HLP_NO_LOCK_PREFIX() \
7094 do \
7095 { \
7096 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7097 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7098 } while (0)
7099
7100/** The instruction is not available in 64-bit mode, throw #UD if we're in
7101 * 64-bit mode. */
7102#define IEMOP_HLP_NO_64BIT() \
7103 do \
7104 { \
7105 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7106 return IEMOP_RAISE_INVALID_OPCODE(); \
7107 } while (0)
7108
7109/** The instruction defaults to 64-bit operand size if 64-bit mode. */
7110#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
7111 do \
7112 { \
7113 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7114 iemRecalEffOpSize64Default(pIemCpu); \
7115 } while (0)
7116
7117/** The instruction has 64-bit operand size if 64-bit mode. */
7118#define IEMOP_HLP_64BIT_OP_SIZE() \
7119 do \
7120 { \
7121 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
7122 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
7123 } while (0)
7124
7125/**
7126 * Done decoding.
7127 */
7128#define IEMOP_HLP_DONE_DECODING() \
7129 do \
7130 { \
7131 /*nothing for now, maybe later... */ \
7132 } while (0)
7133
7134/**
7135 * Done decoding, raise \#UD exception if lock prefix present.
7136 */
7137#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
7138 do \
7139 { \
7140 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
7141 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
7142 } while (0)
7143
7144
7145/**
7146 * Calculates the effective address of a ModR/M memory operand.
7147 *
7148 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7149 *
7150 * @return Strict VBox status code.
7151 * @param pIemCpu The IEM per CPU data.
7152 * @param bRm The ModRM byte.
7153 * @param pGCPtrEff Where to return the effective address.
7154 */
7155static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)
7156{
7157 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7158 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7159#define SET_SS_DEF() \
7160 do \
7161 { \
7162 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7163 pIemCpu->iEffSeg = X86_SREG_SS; \
7164 } while (0)
7165
7166/** @todo Check the effective address size crap! */
7167 switch (pIemCpu->enmEffAddrMode)
7168 {
7169 case IEMMODE_16BIT:
7170 {
7171 uint16_t u16EffAddr;
7172
7173 /* Handle the disp16 form with no registers first. */
7174 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7175 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7176 else
7177 {
7178 /* Get the displacment. */
7179 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7180 {
7181 case 0: u16EffAddr = 0; break;
7182 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7183 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7184 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7185 }
7186
7187 /* Add the base and index registers to the disp. */
7188 switch (bRm & X86_MODRM_RM_MASK)
7189 {
7190 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
7191 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
7192 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
7193 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
7194 case 4: u16EffAddr += pCtx->si; break;
7195 case 5: u16EffAddr += pCtx->di; break;
7196 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
7197 case 7: u16EffAddr += pCtx->bx; break;
7198 }
7199 }
7200
7201 *pGCPtrEff = u16EffAddr;
7202 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));
7203 return VINF_SUCCESS;
7204 }
7205
7206 case IEMMODE_32BIT:
7207 {
7208 uint32_t u32EffAddr;
7209
7210 /* Handle the disp32 form with no registers first. */
7211 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7212 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7213 else
7214 {
7215 /* Get the register (or SIB) value. */
7216 switch ((bRm & X86_MODRM_RM_MASK))
7217 {
7218 case 0: u32EffAddr = pCtx->eax; break;
7219 case 1: u32EffAddr = pCtx->ecx; break;
7220 case 2: u32EffAddr = pCtx->edx; break;
7221 case 3: u32EffAddr = pCtx->ebx; break;
7222 case 4: /* SIB */
7223 {
7224 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7225
7226 /* Get the index and scale it. */
7227 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7228 {
7229 case 0: u32EffAddr = pCtx->eax; break;
7230 case 1: u32EffAddr = pCtx->ecx; break;
7231 case 2: u32EffAddr = pCtx->edx; break;
7232 case 3: u32EffAddr = pCtx->ebx; break;
7233 case 4: u32EffAddr = 0; /*none */ break;
7234 case 5: u32EffAddr = pCtx->ebp; break;
7235 case 6: u32EffAddr = pCtx->esi; break;
7236 case 7: u32EffAddr = pCtx->edi; break;
7237 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7238 }
7239 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7240
7241 /* add base */
7242 switch (bSib & X86_SIB_BASE_MASK)
7243 {
7244 case 0: u32EffAddr += pCtx->eax; break;
7245 case 1: u32EffAddr += pCtx->ecx; break;
7246 case 2: u32EffAddr += pCtx->edx; break;
7247 case 3: u32EffAddr += pCtx->ebx; break;
7248 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
7249 case 5:
7250 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7251 {
7252 u32EffAddr += pCtx->ebp;
7253 SET_SS_DEF();
7254 }
7255 else
7256 {
7257 uint32_t u32Disp;
7258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7259 u32EffAddr += u32Disp;
7260 }
7261 break;
7262 case 6: u32EffAddr += pCtx->esi; break;
7263 case 7: u32EffAddr += pCtx->edi; break;
7264 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7265 }
7266 break;
7267 }
7268 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
7269 case 6: u32EffAddr = pCtx->esi; break;
7270 case 7: u32EffAddr = pCtx->edi; break;
7271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7272 }
7273
7274 /* Get and add the displacement. */
7275 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7276 {
7277 case 0:
7278 break;
7279 case 1:
7280 {
7281 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7282 u32EffAddr += i8Disp;
7283 break;
7284 }
7285 case 2:
7286 {
7287 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7288 u32EffAddr += u32Disp;
7289 break;
7290 }
7291 default:
7292 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
7293 }
7294
7295 }
7296 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
7297 *pGCPtrEff = u32EffAddr;
7298 else
7299 {
7300 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
7301 *pGCPtrEff = u32EffAddr & UINT16_MAX;
7302 }
7303 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7304 return VINF_SUCCESS;
7305 }
7306
7307 case IEMMODE_64BIT:
7308 {
7309 uint64_t u64EffAddr;
7310
7311 /* Handle the rip+disp32 form with no registers first. */
7312 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7313 {
7314 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
7315 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;
7316 }
7317 else
7318 {
7319 /* Get the register (or SIB) value. */
7320 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
7321 {
7322 case 0: u64EffAddr = pCtx->rax; break;
7323 case 1: u64EffAddr = pCtx->rcx; break;
7324 case 2: u64EffAddr = pCtx->rdx; break;
7325 case 3: u64EffAddr = pCtx->rbx; break;
7326 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
7327 case 6: u64EffAddr = pCtx->rsi; break;
7328 case 7: u64EffAddr = pCtx->rdi; break;
7329 case 8: u64EffAddr = pCtx->r8; break;
7330 case 9: u64EffAddr = pCtx->r9; break;
7331 case 10: u64EffAddr = pCtx->r10; break;
7332 case 11: u64EffAddr = pCtx->r11; break;
7333 case 13: u64EffAddr = pCtx->r13; break;
7334 case 14: u64EffAddr = pCtx->r14; break;
7335 case 15: u64EffAddr = pCtx->r15; break;
7336 /* SIB */
7337 case 4:
7338 case 12:
7339 {
7340 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7341
7342 /* Get the index and scale it. */
7343 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
7344 {
7345 case 0: u64EffAddr = pCtx->rax; break;
7346 case 1: u64EffAddr = pCtx->rcx; break;
7347 case 2: u64EffAddr = pCtx->rdx; break;
7348 case 3: u64EffAddr = pCtx->rbx; break;
7349 case 4: u64EffAddr = 0; /*none */ break;
7350 case 5: u64EffAddr = pCtx->rbp; break;
7351 case 6: u64EffAddr = pCtx->rsi; break;
7352 case 7: u64EffAddr = pCtx->rdi; break;
7353 case 8: u64EffAddr = pCtx->r8; break;
7354 case 9: u64EffAddr = pCtx->r9; break;
7355 case 10: u64EffAddr = pCtx->r10; break;
7356 case 11: u64EffAddr = pCtx->r11; break;
7357 case 12: u64EffAddr = pCtx->r12; break;
7358 case 13: u64EffAddr = pCtx->r13; break;
7359 case 14: u64EffAddr = pCtx->r14; break;
7360 case 15: u64EffAddr = pCtx->r15; break;
7361 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7362 }
7363 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7364
7365 /* add base */
7366 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
7367 {
7368 case 0: u64EffAddr += pCtx->rax; break;
7369 case 1: u64EffAddr += pCtx->rcx; break;
7370 case 2: u64EffAddr += pCtx->rdx; break;
7371 case 3: u64EffAddr += pCtx->rbx; break;
7372 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
7373 case 6: u64EffAddr += pCtx->rsi; break;
7374 case 7: u64EffAddr += pCtx->rdi; break;
7375 case 8: u64EffAddr += pCtx->r8; break;
7376 case 9: u64EffAddr += pCtx->r9; break;
7377 case 10: u64EffAddr += pCtx->r10; break;
7378 case 11: u64EffAddr += pCtx->r11; break;
7379 case 14: u64EffAddr += pCtx->r14; break;
7380 case 15: u64EffAddr += pCtx->r15; break;
7381 /* complicated encodings */
7382 case 5:
7383 case 13:
7384 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7385 {
7386 if (!pIemCpu->uRexB)
7387 {
7388 u64EffAddr += pCtx->rbp;
7389 SET_SS_DEF();
7390 }
7391 else
7392 u64EffAddr += pCtx->r13;
7393 }
7394 else
7395 {
7396 uint32_t u32Disp;
7397 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7398 u64EffAddr += (int32_t)u32Disp;
7399 }
7400 break;
7401 }
7402 break;
7403 }
7404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7405 }
7406
7407 /* Get and add the displacement. */
7408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7409 {
7410 case 0:
7411 break;
7412 case 1:
7413 {
7414 int8_t i8Disp;
7415 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7416 u64EffAddr += i8Disp;
7417 break;
7418 }
7419 case 2:
7420 {
7421 uint32_t u32Disp;
7422 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7423 u64EffAddr += (int32_t)u32Disp;
7424 break;
7425 }
7426 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
7427 }
7428
7429 }
7430 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
7431 *pGCPtrEff = u64EffAddr;
7432 else
7433 *pGCPtrEff = u64EffAddr & UINT16_MAX;
7434 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
7435 return VINF_SUCCESS;
7436 }
7437 }
7438
7439 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
7440}
7441
7442/** @} */
7443
7444
7445
7446/*
7447 * Include the instructions
7448 */
7449#include "IEMAllInstructions.cpp.h"
7450
7451
7452
7453
7454#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7455
7456/**
7457 * Sets up execution verification mode.
7458 */
7459static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
7460{
7461 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7462 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
7463
7464 /*
7465 * Always note down the address of the current instruction.
7466 */
7467 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
7468 pIemCpu->uOldRip = pOrgCtx->rip;
7469
7470 /*
7471 * Enable verification and/or logging.
7472 */
7473 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */
7474 if ( pIemCpu->fNoRem
7475 && ( 0
7476#if 0 /* auto enable on first paged protected mode interrupt */
7477 || ( pOrgCtx->eflags.Bits.u1IF
7478 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
7479 && TRPMHasTrap(pVCpu)
7480 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
7481#endif
7482#if 0
7483 || ( pOrgCtx->cs == 0x10
7484 && ( pOrgCtx->rip == 0x90119e3e
7485 || pOrgCtx->rip == 0x901d9810)
7486#endif
7487#if 0 /* Auto enable DSL - FPU stuff. */
7488 || ( pOrgCtx->cs == 0x10
7489 && (// pOrgCtx->rip == 0xc02ec07f
7490 //|| pOrgCtx->rip == 0xc02ec082
7491 //|| pOrgCtx->rip == 0xc02ec0c9
7492 0
7493 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
7494#endif
7495#if 0 /* Auto enable DSL - fstp st0 stuff. */
7496 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
7497#endif
7498#if 0
7499 || pOrgCtx->rip == 0x9022bb3a
7500#endif
7501#if 0
7502 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
7503#endif
7504#if 0
7505 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
7506 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
7507#endif
7508#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
7509 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
7510 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
7511 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
7512#endif
7513#if 0 /* NT4SP1 - xadd early boot. */
7514 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
7515#endif
7516#if 0 /* NT4SP1 - wrmsr (intel MSR). */
7517 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
7518#endif
7519#if 0 /* NT4SP1 - cmpxchg (AMD). */
7520 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
7521#endif
7522#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
7523 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
7524#endif
7525#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
7526 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
7527
7528#endif
7529#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
7530 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
7531
7532#endif
7533#if 0 /* NT4SP1 - frstor [ecx] */
7534 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
7535#endif
7536 )
7537 )
7538 {
7539 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
7540 RTLogFlags(NULL, "enabled");
7541 pIemCpu->fNoRem = false;
7542 }
7543
7544 /*
7545 * Switch state.
7546 */
7547 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7548 {
7549 static CPUMCTX s_DebugCtx; /* Ugly! */
7550
7551 s_DebugCtx = *pOrgCtx;
7552 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
7553 }
7554
7555 /*
7556 * See if there is an interrupt pending in TRPM and inject it if we can.
7557 */
7558 pIemCpu->uInjectCpl = UINT8_MAX;
7559 if ( pOrgCtx->eflags.Bits.u1IF
7560 && TRPMHasTrap(pVCpu)
7561 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
7562 {
7563 uint8_t u8TrapNo;
7564 TRPMEVENT enmType;
7565 RTGCUINT uErrCode;
7566 RTGCPTR uCr2;
7567 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2); AssertRC(rc2);
7568 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
7569 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7570 TRPMResetTrap(pVCpu);
7571 pIemCpu->uInjectCpl = pIemCpu->uCpl;
7572 }
7573
7574 /*
7575 * Reset the counters.
7576 */
7577 pIemCpu->cIOReads = 0;
7578 pIemCpu->cIOWrites = 0;
7579 pIemCpu->fIgnoreRaxRdx = false;
7580 pIemCpu->fOverlappingMovs = false;
7581 pIemCpu->fUndefinedEFlags = 0;
7582
7583 if (IEM_VERIFICATION_ENABLED(pIemCpu))
7584 {
7585 /*
7586 * Free all verification records.
7587 */
7588 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
7589 pIemCpu->pIemEvtRecHead = NULL;
7590 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
7591 do
7592 {
7593 while (pEvtRec)
7594 {
7595 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
7596 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
7597 pIemCpu->pFreeEvtRec = pEvtRec;
7598 pEvtRec = pNext;
7599 }
7600 pEvtRec = pIemCpu->pOtherEvtRecHead;
7601 pIemCpu->pOtherEvtRecHead = NULL;
7602 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
7603 } while (pEvtRec);
7604 }
7605}
7606
7607
7608/**
7609 * Allocate an event record.
7610 * @returns Pointer to a record.
7611 */
7612static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
7613{
7614 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7615 return NULL;
7616
7617 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
7618 if (pEvtRec)
7619 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
7620 else
7621 {
7622 if (!pIemCpu->ppIemEvtRecNext)
7623 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
7624
7625 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
7626 if (!pEvtRec)
7627 return NULL;
7628 }
7629 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
7630 pEvtRec->pNext = NULL;
7631 return pEvtRec;
7632}
7633
7634
7635/**
7636 * IOMMMIORead notification.
7637 */
7638VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
7639{
7640 PVMCPU pVCpu = VMMGetCpu(pVM);
7641 if (!pVCpu)
7642 return;
7643 PIEMCPU pIemCpu = &pVCpu->iem.s;
7644 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7645 if (!pEvtRec)
7646 return;
7647 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7648 pEvtRec->u.RamRead.GCPhys = GCPhys;
7649 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
7650 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7651 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7652}
7653
7654
7655/**
7656 * IOMMMIOWrite notification.
7657 */
7658VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
7659{
7660 PVMCPU pVCpu = VMMGetCpu(pVM);
7661 if (!pVCpu)
7662 return;
7663 PIEMCPU pIemCpu = &pVCpu->iem.s;
7664 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7665 if (!pEvtRec)
7666 return;
7667 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7668 pEvtRec->u.RamWrite.GCPhys = GCPhys;
7669 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
7670 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
7671 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
7672 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
7673 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
7674 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7675 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7676}
7677
7678
7679/**
7680 * IOMIOPortRead notification.
7681 */
7682VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
7683{
7684 PVMCPU pVCpu = VMMGetCpu(pVM);
7685 if (!pVCpu)
7686 return;
7687 PIEMCPU pIemCpu = &pVCpu->iem.s;
7688 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7689 if (!pEvtRec)
7690 return;
7691 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7692 pEvtRec->u.IOPortRead.Port = Port;
7693 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7694 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7695 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7696}
7697
7698/**
7699 * IOMIOPortWrite notification.
7700 */
7701VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7702{
7703 PVMCPU pVCpu = VMMGetCpu(pVM);
7704 if (!pVCpu)
7705 return;
7706 PIEMCPU pIemCpu = &pVCpu->iem.s;
7707 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7708 if (!pEvtRec)
7709 return;
7710 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7711 pEvtRec->u.IOPortWrite.Port = Port;
7712 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7713 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7714 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
7715 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
7716}
7717
7718
7719VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
7720{
7721 AssertFailed();
7722}
7723
7724
7725VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
7726{
7727 AssertFailed();
7728}
7729
7730
7731/**
7732 * Fakes and records an I/O port read.
7733 *
7734 * @returns VINF_SUCCESS.
7735 * @param pIemCpu The IEM per CPU data.
7736 * @param Port The I/O port.
7737 * @param pu32Value Where to store the fake value.
7738 * @param cbValue The size of the access.
7739 */
7740static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
7741{
7742 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7743 if (pEvtRec)
7744 {
7745 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
7746 pEvtRec->u.IOPortRead.Port = Port;
7747 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
7748 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7749 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7750 }
7751 pIemCpu->cIOReads++;
7752 *pu32Value = 0xcccccccc;
7753 return VINF_SUCCESS;
7754}
7755
7756
7757/**
7758 * Fakes and records an I/O port write.
7759 *
7760 * @returns VINF_SUCCESS.
7761 * @param pIemCpu The IEM per CPU data.
7762 * @param Port The I/O port.
7763 * @param u32Value The value being written.
7764 * @param cbValue The size of the access.
7765 */
7766static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
7767{
7768 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
7769 if (pEvtRec)
7770 {
7771 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
7772 pEvtRec->u.IOPortWrite.Port = Port;
7773 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
7774 pEvtRec->u.IOPortWrite.u32Value = u32Value;
7775 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
7776 *pIemCpu->ppIemEvtRecNext = pEvtRec;
7777 }
7778 pIemCpu->cIOWrites++;
7779 return VINF_SUCCESS;
7780}
7781
7782
7783/**
7784 * Used to add extra details about a stub case.
7785 * @param pIemCpu The IEM per CPU state.
7786 */
7787static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
7788{
7789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7790 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7791 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
7792 char szRegs[4096];
7793 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
7794 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
7795 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
7796 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
7797 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
7798 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
7799 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
7800 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
7801 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
7802 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
7803 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
7804 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
7805 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
7806 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
7807 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
7808 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
7809 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
7810 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
7811 " efer=%016VR{efer}\n"
7812 " pat=%016VR{pat}\n"
7813 " sf_mask=%016VR{sf_mask}\n"
7814 "krnl_gs_base=%016VR{krnl_gs_base}\n"
7815 " lstar=%016VR{lstar}\n"
7816 " star=%016VR{star} cstar=%016VR{cstar}\n"
7817 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
7818 );
7819
7820 char szInstr1[256];
7821 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
7822 DBGF_DISAS_FLAGS_DEFAULT_MODE,
7823 szInstr1, sizeof(szInstr1), NULL);
7824 char szInstr2[256];
7825 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,
7826 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
7827 szInstr2, sizeof(szInstr2), NULL);
7828
7829 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
7830}
7831
7832
7833/**
7834 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
7835 * dump to the assertion info.
7836 *
7837 * @param pEvtRec The record to dump.
7838 */
7839static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
7840{
7841 switch (pEvtRec->enmEvent)
7842 {
7843 case IEMVERIFYEVENT_IOPORT_READ:
7844 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
7845 pEvtRec->u.IOPortWrite.Port,
7846 pEvtRec->u.IOPortWrite.cbValue);
7847 break;
7848 case IEMVERIFYEVENT_IOPORT_WRITE:
7849 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
7850 pEvtRec->u.IOPortWrite.Port,
7851 pEvtRec->u.IOPortWrite.cbValue,
7852 pEvtRec->u.IOPortWrite.u32Value);
7853 break;
7854 case IEMVERIFYEVENT_RAM_READ:
7855 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
7856 pEvtRec->u.RamRead.GCPhys,
7857 pEvtRec->u.RamRead.cb);
7858 break;
7859 case IEMVERIFYEVENT_RAM_WRITE:
7860 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
7861 pEvtRec->u.RamWrite.GCPhys,
7862 pEvtRec->u.RamWrite.cb,
7863 (int)pEvtRec->u.RamWrite.cb,
7864 pEvtRec->u.RamWrite.ab);
7865 break;
7866 default:
7867 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
7868 break;
7869 }
7870}
7871
7872
7873/**
7874 * Raises an assertion on the specified record, showing the given message with
7875 * a record dump attached.
7876 *
7877 * @param pIemCpu The IEM per CPU data.
7878 * @param pEvtRec1 The first record.
7879 * @param pEvtRec2 The second record.
7880 * @param pszMsg The message explaining why we're asserting.
7881 */
7882static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
7883{
7884 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7885 iemVerifyAssertAddRecordDump(pEvtRec1);
7886 iemVerifyAssertAddRecordDump(pEvtRec2);
7887 iemVerifyAssertMsg2(pIemCpu);
7888 RTAssertPanic();
7889}
7890
7891
7892/**
7893 * Raises an assertion on the specified record, showing the given message with
7894 * a record dump attached.
7895 *
7896 * @param pIemCpu The IEM per CPU data.
7897 * @param pEvtRec1 The first record.
7898 * @param pszMsg The message explaining why we're asserting.
7899 */
7900static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
7901{
7902 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7903 iemVerifyAssertAddRecordDump(pEvtRec);
7904 iemVerifyAssertMsg2(pIemCpu);
7905 RTAssertPanic();
7906}
7907
7908
7909/**
7910 * Verifies a write record.
7911 *
7912 * @param pIemCpu The IEM per CPU data.
7913 * @param pEvtRec The write record.
7914 */
7915static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)
7916{
7917 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
7918 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
7919 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
7920 if ( RT_FAILURE(rc)
7921 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
7922 {
7923 /* fend off ins */
7924 if ( !pIemCpu->cIOReads
7925 || pEvtRec->u.RamWrite.ab[0] != 0xcc
7926 || ( pEvtRec->u.RamWrite.cb != 1
7927 && pEvtRec->u.RamWrite.cb != 2
7928 && pEvtRec->u.RamWrite.cb != 4) )
7929 {
7930 /* fend off ROMs */
7931 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000)
7932 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000)
7933 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
7934 {
7935 /* fend off fxsave */
7936 if (pEvtRec->u.RamWrite.cb != 512)
7937 {
7938 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
7939 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
7940 RTAssertMsg2Add("REM: %.*Rhxs\n"
7941 "IEM: %.*Rhxs\n",
7942 pEvtRec->u.RamWrite.cb, abBuf,
7943 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
7944 iemVerifyAssertAddRecordDump(pEvtRec);
7945 iemVerifyAssertMsg2(pIemCpu);
7946 RTAssertPanic();
7947 }
7948 }
7949 }
7950 }
7951
7952}
7953
7954/**
7955 * Performs the post-execution verfication checks.
7956 */
7957static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
7958{
7959 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
7960 return;
7961
7962 /*
7963 * Switch back the state.
7964 */
7965 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
7966 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
7967 Assert(pOrgCtx != pDebugCtx);
7968 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
7969
7970 /*
7971 * Execute the instruction in REM.
7972 */
7973 PVM pVM = IEMCPU_TO_VM(pIemCpu);
7974 EMRemLock(pVM);
7975 int rc = REMR3EmulateInstruction(pVM, IEMCPU_TO_VMCPU(pIemCpu));
7976 AssertRC(rc);
7977 EMRemUnlock(pVM);
7978
7979 /*
7980 * Compare the register states.
7981 */
7982 unsigned cDiffs = 0;
7983 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
7984 {
7985 //Log(("REM and IEM ends up with different registers!\n"));
7986
7987# define CHECK_FIELD(a_Field) \
7988 do \
7989 { \
7990 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
7991 { \
7992 switch (sizeof(pOrgCtx->a_Field)) \
7993 { \
7994 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7995 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7996 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7997 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
7998 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
7999 } \
8000 cDiffs++; \
8001 } \
8002 } while (0)
8003
8004# define CHECK_BIT_FIELD(a_Field) \
8005 do \
8006 { \
8007 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
8008 { \
8009 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
8010 cDiffs++; \
8011 } \
8012 } while (0)
8013
8014# define CHECK_SEL(a_Sel) \
8015 do \
8016 { \
8017 CHECK_FIELD(a_Sel.Sel); \
8018 CHECK_FIELD(a_Sel.Attr.u); \
8019 CHECK_FIELD(a_Sel.u64Base); \
8020 CHECK_FIELD(a_Sel.u32Limit); \
8021 CHECK_FIELD(a_Sel.fFlags); \
8022 } while (0)
8023
8024#if 1 /* The recompiler doesn't update these the intel way. */
8025 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
8026 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
8027 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
8028 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
8029 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
8030 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
8031 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
8032 pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
8033 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
8034 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
8035#endif
8036 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
8037 {
8038 RTAssertMsg2Weak(" the FPU state differs\n");
8039 cDiffs++;
8040 CHECK_FIELD(fpu.FCW);
8041 CHECK_FIELD(fpu.FSW);
8042 CHECK_FIELD(fpu.FTW);
8043 CHECK_FIELD(fpu.FOP);
8044 CHECK_FIELD(fpu.FPUIP);
8045 CHECK_FIELD(fpu.CS);
8046 CHECK_FIELD(fpu.Rsrvd1);
8047 CHECK_FIELD(fpu.FPUDP);
8048 CHECK_FIELD(fpu.DS);
8049 CHECK_FIELD(fpu.Rsrvd2);
8050 CHECK_FIELD(fpu.MXCSR);
8051 CHECK_FIELD(fpu.MXCSR_MASK);
8052 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
8053 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
8054 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
8055 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
8056 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
8057 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
8058 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
8059 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
8060 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
8061 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
8062 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
8063 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
8064 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
8065 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
8066 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
8067 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
8068 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
8069 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
8070 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
8071 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
8072 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
8073 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
8074 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
8075 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
8076 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
8077 CHECK_FIELD(fpu.au32RsrvdRest[i]);
8078 }
8079 CHECK_FIELD(rip);
8080 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
8081 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
8082 {
8083 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
8084 CHECK_BIT_FIELD(rflags.Bits.u1CF);
8085 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
8086 CHECK_BIT_FIELD(rflags.Bits.u1PF);
8087 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
8088 CHECK_BIT_FIELD(rflags.Bits.u1AF);
8089 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
8090 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
8091 CHECK_BIT_FIELD(rflags.Bits.u1SF);
8092 CHECK_BIT_FIELD(rflags.Bits.u1TF);
8093 CHECK_BIT_FIELD(rflags.Bits.u1IF);
8094 CHECK_BIT_FIELD(rflags.Bits.u1DF);
8095 CHECK_BIT_FIELD(rflags.Bits.u1OF);
8096 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
8097 CHECK_BIT_FIELD(rflags.Bits.u1NT);
8098 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
8099 CHECK_BIT_FIELD(rflags.Bits.u1RF);
8100 CHECK_BIT_FIELD(rflags.Bits.u1VM);
8101 CHECK_BIT_FIELD(rflags.Bits.u1AC);
8102 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
8103 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
8104 CHECK_BIT_FIELD(rflags.Bits.u1ID);
8105 }
8106
8107 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
8108 CHECK_FIELD(rax);
8109 CHECK_FIELD(rcx);
8110 if (!pIemCpu->fIgnoreRaxRdx)
8111 CHECK_FIELD(rdx);
8112 CHECK_FIELD(rbx);
8113 CHECK_FIELD(rsp);
8114 CHECK_FIELD(rbp);
8115 CHECK_FIELD(rsi);
8116 CHECK_FIELD(rdi);
8117 CHECK_FIELD(r8);
8118 CHECK_FIELD(r9);
8119 CHECK_FIELD(r10);
8120 CHECK_FIELD(r11);
8121 CHECK_FIELD(r12);
8122 CHECK_FIELD(r13);
8123 CHECK_SEL(cs);
8124 CHECK_SEL(ss);
8125 CHECK_SEL(ds);
8126 CHECK_SEL(es);
8127 CHECK_SEL(fs);
8128 CHECK_SEL(gs);
8129 CHECK_FIELD(cr0);
8130 /* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
8131 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
8132 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
8133 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
8134 if (pOrgCtx->cr2 != pDebugCtx->cr2)
8135 {
8136 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3)
8137 { /* ignore */ }
8138 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
8139 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0)
8140 { /* ignore */ }
8141 else
8142 CHECK_FIELD(cr2);
8143 }
8144 CHECK_FIELD(cr3);
8145 CHECK_FIELD(cr4);
8146 CHECK_FIELD(dr[0]);
8147 CHECK_FIELD(dr[1]);
8148 CHECK_FIELD(dr[2]);
8149 CHECK_FIELD(dr[3]);
8150 CHECK_FIELD(dr[6]);
8151 if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
8152 CHECK_FIELD(dr[7]);
8153 CHECK_FIELD(gdtr.cbGdt);
8154 CHECK_FIELD(gdtr.pGdt);
8155 CHECK_FIELD(idtr.cbIdt);
8156 CHECK_FIELD(idtr.pIdt);
8157 CHECK_SEL(ldtr);
8158 CHECK_SEL(tr);
8159 CHECK_FIELD(SysEnter.cs);
8160 CHECK_FIELD(SysEnter.eip);
8161 CHECK_FIELD(SysEnter.esp);
8162 CHECK_FIELD(msrEFER);
8163 CHECK_FIELD(msrSTAR);
8164 CHECK_FIELD(msrPAT);
8165 CHECK_FIELD(msrLSTAR);
8166 CHECK_FIELD(msrCSTAR);
8167 CHECK_FIELD(msrSFMASK);
8168 CHECK_FIELD(msrKERNELGSBASE);
8169
8170 if (cDiffs != 0)
8171 {
8172 DBGFR3Info(pVM, "cpumguest", "verbose", NULL);
8173 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
8174 iemVerifyAssertMsg2(pIemCpu);
8175 RTAssertPanic();
8176 }
8177# undef CHECK_FIELD
8178# undef CHECK_BIT_FIELD
8179 }
8180
8181 /*
8182 * If the register state compared fine, check the verification event
8183 * records.
8184 */
8185 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
8186 {
8187 /*
8188 * Compare verficiation event records.
8189 * - I/O port accesses should be a 1:1 match.
8190 */
8191 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
8192 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
8193 while (pIemRec && pOtherRec)
8194 {
8195 /* Since we might miss RAM writes and reads, ignore reads and check
8196 that any written memory is the same extra ones. */
8197 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
8198 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
8199 && pIemRec->pNext)
8200 {
8201 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8202 iemVerifyWriteRecord(pIemCpu, pIemRec);
8203 pIemRec = pIemRec->pNext;
8204 }
8205
8206 /* Do the compare. */
8207 if (pIemRec->enmEvent != pOtherRec->enmEvent)
8208 {
8209 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
8210 break;
8211 }
8212 bool fEquals;
8213 switch (pIemRec->enmEvent)
8214 {
8215 case IEMVERIFYEVENT_IOPORT_READ:
8216 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
8217 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
8218 break;
8219 case IEMVERIFYEVENT_IOPORT_WRITE:
8220 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
8221 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
8222 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
8223 break;
8224 case IEMVERIFYEVENT_RAM_READ:
8225 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
8226 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
8227 break;
8228 case IEMVERIFYEVENT_RAM_WRITE:
8229 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
8230 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
8231 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
8232 break;
8233 default:
8234 fEquals = false;
8235 break;
8236 }
8237 if (!fEquals)
8238 {
8239 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
8240 break;
8241 }
8242
8243 /* advance */
8244 pIemRec = pIemRec->pNext;
8245 pOtherRec = pOtherRec->pNext;
8246 }
8247
8248 /* Ignore extra writes and reads. */
8249 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
8250 {
8251 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
8252 iemVerifyWriteRecord(pIemCpu, pIemRec);
8253 pIemRec = pIemRec->pNext;
8254 }
8255 if (pIemRec != NULL)
8256 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
8257 else if (pOtherRec != NULL)
8258 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");
8259 }
8260 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
8261}
8262
8263#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8264
8265/* stubs */
8266static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8267{
8268 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
8269 return VERR_INTERNAL_ERROR;
8270}
8271
8272static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8273{
8274 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
8275 return VERR_INTERNAL_ERROR;
8276}
8277
8278#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
8279
8280
8281/**
8282 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8283 * IEMExecOneWithPrefetchedByPC.
8284 *
8285 * @return Strict VBox status code.
8286 * @param pVCpu The current virtual CPU.
8287 * @param pIemCpu The IEM per CPU data.
8288 * @param fExecuteInhibit If set, execute the instruction following CLI,
8289 * POP SS and MOV SS,GR.
8290 */
8291DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
8292{
8293 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8294 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8295 if (rcStrict == VINF_SUCCESS)
8296 pIemCpu->cInstructions++;
8297//#ifdef DEBUG
8298// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
8299//#endif
8300
8301 /* Execute the next instruction as well if a cli, pop ss or
8302 mov ss, Gr has just completed successfully. */
8303 if ( fExecuteInhibit
8304 && rcStrict == VINF_SUCCESS
8305 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
8306 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
8307 {
8308 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
8309 if (rcStrict == VINF_SUCCESS)
8310 {
8311 b; IEM_OPCODE_GET_NEXT_U8(&b);
8312 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
8313 if (rcStrict == VINF_SUCCESS)
8314 pIemCpu->cInstructions++;
8315 }
8316 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
8317 }
8318
8319 /*
8320 * Return value fiddling and statistics.
8321 */
8322 if (rcStrict != VINF_SUCCESS)
8323 {
8324 if (RT_SUCCESS(rcStrict))
8325 {
8326 AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8327 int32_t const rcPassUp = pIemCpu->rcPassUp;
8328 if (rcPassUp == VINF_SUCCESS)
8329 pIemCpu->cRetInfStatuses++;
8330 else if ( rcPassUp < VINF_EM_FIRST
8331 || rcPassUp > VINF_EM_LAST
8332 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
8333 {
8334 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8335 pIemCpu->cRetPassUpStatus++;
8336 rcStrict = rcPassUp;
8337 }
8338 else
8339 {
8340 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
8341 pIemCpu->cRetInfStatuses++;
8342 }
8343 }
8344 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
8345 pIemCpu->cRetAspectNotImplemented++;
8346 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
8347 pIemCpu->cRetInstrNotImplemented++;
8348#ifdef IEM_VERIFICATION_MODE_FULL
8349 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
8350 rcStrict = VINF_SUCCESS;
8351#endif
8352 else
8353 pIemCpu->cRetErrStatuses++;
8354 }
8355 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
8356 {
8357 pIemCpu->cRetPassUpStatus++;
8358 rcStrict = pIemCpu->rcPassUp;
8359 }
8360
8361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
8362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
8363#if defined(IEM_VERIFICATION_MODE_FULL)
8364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
8365 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
8366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
8367 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
8368#endif
8369 return rcStrict;
8370}
8371
8372
8373/**
8374 * Execute one instruction.
8375 *
8376 * @return Strict VBox status code.
8377 * @param pVCpu The current virtual CPU.
8378 */
8379VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
8380{
8381 PIEMCPU pIemCpu = &pVCpu->iem.s;
8382
8383#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8384 iemExecVerificationModeSetup(pIemCpu);
8385#endif
8386#ifdef LOG_ENABLED
8387 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8388# ifdef IN_RING3
8389 if (LogIs2Enabled())
8390 {
8391 char szInstr[256];
8392 uint32_t cbInstr = 0;
8393 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,
8394 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8395 szInstr, sizeof(szInstr), &cbInstr);
8396
8397 Log3(("**** "
8398 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8399 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
8400 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8401 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8402 " %s\n"
8403 ,
8404 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
8405 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
8406 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
8407 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
8408 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
8409 szInstr));
8410
8411 if (LogIs3Enabled())
8412 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL);
8413 }
8414 else
8415# endif
8416 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
8417 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
8418#endif
8419
8420 /*
8421 * Do the decoding and emulation.
8422 */
8423 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8424 if (rcStrict == VINF_SUCCESS)
8425 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8426
8427#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8428 /*
8429 * Assert some sanity.
8430 */
8431 iemExecVerificationModeCheck(pIemCpu);
8432#endif
8433 if (rcStrict != VINF_SUCCESS)
8434 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8435 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8436 return rcStrict;
8437}
8438
8439
8440VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8441{
8442 PIEMCPU pIemCpu = &pVCpu->iem.s;
8443 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8444 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8445
8446 iemInitDecoder(pIemCpu, false);
8447 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8448
8449 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8450 if (rcStrict == VINF_SUCCESS)
8451 {
8452 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8453 if (pcbWritten)
8454 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8455 }
8456 return rcStrict;
8457}
8458
8459
8460VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8461 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8462{
8463 PIEMCPU pIemCpu = &pVCpu->iem.s;
8464 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8465 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8466
8467 VBOXSTRICTRC rcStrict;
8468 if ( cbOpcodeBytes
8469 && pCtx->rip == OpcodeBytesPC)
8470 {
8471 iemInitDecoder(pIemCpu, false);
8472 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8473 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8474 rcStrict = VINF_SUCCESS;
8475 }
8476 else
8477 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
8478 if (rcStrict == VINF_SUCCESS)
8479 {
8480 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
8481 }
8482 return rcStrict;
8483}
8484
8485
8486VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
8487{
8488 PIEMCPU pIemCpu = &pVCpu->iem.s;
8489 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8490 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8491
8492 iemInitDecoder(pIemCpu, true);
8493 uint32_t const cbOldWritten = pIemCpu->cbWritten;
8494
8495 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8496 if (rcStrict == VINF_SUCCESS)
8497 {
8498 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8499 if (pcbWritten)
8500 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
8501 }
8502 return rcStrict;
8503}
8504
8505
8506VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
8507 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
8508{
8509 PIEMCPU pIemCpu = &pVCpu->iem.s;
8510 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8511 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
8512
8513 VBOXSTRICTRC rcStrict;
8514 if ( cbOpcodeBytes
8515 && pCtx->rip == OpcodeBytesPC)
8516 {
8517 iemInitDecoder(pIemCpu, true);
8518 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
8519 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
8520 rcStrict = VINF_SUCCESS;
8521 }
8522 else
8523 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
8524 if (rcStrict == VINF_SUCCESS)
8525 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
8526 return rcStrict;
8527}
8528
8529
8530VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
8531{
8532 return IEMExecOne(pVCpu);
8533}
8534
8535
8536
8537/**
8538 * Injects a trap, fault, abort, software interrupt or external interrupt.
8539 *
8540 * The parameter list matches TRPMQueryTrapAll pretty closely.
8541 *
8542 * @returns Strict VBox status code.
8543 * @param pVCpu The current virtual CPU.
8544 * @param u8TrapNo The trap number.
8545 * @param enmType What type is it (trap/fault/abort), software
8546 * interrupt or hardware interrupt.
8547 * @param uErrCode The error code if applicable.
8548 * @param uCr2 The CR2 value if applicable.
8549 */
8550VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
8551{
8552 iemInitDecoder(&pVCpu->iem.s, false);
8553
8554 uint32_t fFlags;
8555 switch (enmType)
8556 {
8557 case TRPM_HARDWARE_INT:
8558 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
8559 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
8560 uErrCode = uCr2 = 0;
8561 break;
8562
8563 case TRPM_SOFTWARE_INT:
8564 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
8565 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
8566 uErrCode = uCr2 = 0;
8567 break;
8568
8569 case TRPM_TRAP:
8570 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
8571 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
8572 if (u8TrapNo == X86_XCPT_PF)
8573 fFlags |= IEM_XCPT_FLAGS_CR2;
8574 switch (u8TrapNo)
8575 {
8576 case X86_XCPT_DF:
8577 case X86_XCPT_TS:
8578 case X86_XCPT_NP:
8579 case X86_XCPT_SS:
8580 case X86_XCPT_PF:
8581 case X86_XCPT_AC:
8582 fFlags |= IEM_XCPT_FLAGS_ERR;
8583 break;
8584 }
8585 break;
8586
8587 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8588 }
8589
8590 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
8591}
8592
8593
8594VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
8595{
8596 return VERR_NOT_IMPLEMENTED;
8597}
8598
8599
8600VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
8601{
8602 return VERR_NOT_IMPLEMENTED;
8603}
8604
8605
8606#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
8607/**
8608 * Executes a IRET instruction with default operand size.
8609 *
8610 * This is for PATM.
8611 *
8612 * @returns VBox status code.
8613 * @param pVCpu The current virtual CPU.
8614 * @param pCtxCore The register frame.
8615 */
8616VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
8617{
8618 PIEMCPU pIemCpu = &pVCpu->iem.s;
8619 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
8620
8621 iemCtxCoreToCtx(pCtx, pCtxCore);
8622 iemInitDecoder(pIemCpu);
8623 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
8624 if (rcStrict == VINF_SUCCESS)
8625 iemCtxToCtxCore(pCtxCore, pCtx);
8626 else
8627 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
8628 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
8629 return rcStrict;
8630}
8631#endif
8632
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette